mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
Merge tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Most of the MM queue. A few things are still pending.
Liam's maple tree rework didn't make it. This has resulted in a few
other minor patch series being held over for next time.
Multi-gen LRU still isn't merged as we were waiting for mapletree to
stabilize. The current plan is to merge MGLRU into -mm soon and to
later reintroduce mapletree, with a view to hopefully getting both
into 6.1-rc1.
Summary:
- The usual batches of cleanups from Baoquan He, Muchun Song, Miaohe
Lin, Yang Shi, Anshuman Khandual and Mike Rapoport
- Some kmemleak fixes from Patrick Wang and Waiman Long
- DAMON updates from SeongJae Park
- memcg debug/visibility work from Roman Gushchin
- vmalloc speedup from Uladzislau Rezki
- more folio conversion work from Matthew Wilcox
- enhancements for coherent device memory mapping from Alex Sierra
- addition of shared pages tracking and CoW support for fsdax, from
Shiyang Ruan
- hugetlb optimizations from Mike Kravetz
- Mel Gorman has contributed some pagealloc changes to improve
latency and realtime behaviour.
- mprotect soft-dirty checking has been improved by Peter Xu
- Many other singleton patches all over the place"
[ XFS merge from hell as per Darrick Wong in
https://lore.kernel.org/all/YshKnxb4VwXycPO8@magnolia/ ]
* tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (282 commits)
tools/testing/selftests/vm/hmm-tests.c: fix build
mm: Kconfig: fix typo
mm: memory-failure: convert to pr_fmt()
mm: use is_zone_movable_page() helper
hugetlbfs: fix inaccurate comment in hugetlbfs_statfs()
hugetlbfs: cleanup some comments in inode.c
hugetlbfs: remove unneeded header file
hugetlbfs: remove unneeded hugetlbfs_ops forward declaration
hugetlbfs: use helper macro SZ_1{K,M}
mm: cleanup is_highmem()
mm/hmm: add a test for cross device private faults
selftests: add soft-dirty into run_vmtests.sh
selftests: soft-dirty: add test for mprotect
mm/mprotect: fix soft-dirty check in can_change_pte_writable()
mm: memcontrol: fix potential oom_lock recursion deadlock
mm/gup.c: fix formatting in check_and_migrate_movable_page()
xfs: fail dax mount if reflink is enabled on a partition
mm/memcontrol.c: remove the redundant updating of stats_flush_threshold
userfaultfd: don't fail on unrecognized features
hugetlb_cgroup: fix wrong hugetlb cgroup numa stat
...
This commit is contained in:
@@ -148,15 +148,21 @@ again:
|
||||
if (is_writable_device_private_entry(entry))
|
||||
mpfn |= MIGRATE_PFN_WRITE;
|
||||
} else {
|
||||
if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
|
||||
goto next;
|
||||
pfn = pte_pfn(pte);
|
||||
if (is_zero_pfn(pfn)) {
|
||||
if (is_zero_pfn(pfn) &&
|
||||
(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
|
||||
mpfn = MIGRATE_PFN_MIGRATE;
|
||||
migrate->cpages++;
|
||||
goto next;
|
||||
}
|
||||
page = vm_normal_page(migrate->vma, addr, pte);
|
||||
if (page && !is_zone_device_page(page) &&
|
||||
!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
|
||||
goto next;
|
||||
else if (page && is_device_coherent_page(page) &&
|
||||
(!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
|
||||
page->pgmap->owner != migrate->pgmap_owner))
|
||||
goto next;
|
||||
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
|
||||
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
|
||||
}
|
||||
@@ -518,7 +524,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
|
||||
* handle_pte_fault()
|
||||
* do_anonymous_page()
|
||||
* to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
|
||||
* private page.
|
||||
* private or coherent page.
|
||||
*/
|
||||
static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
||||
unsigned long addr,
|
||||
@@ -594,11 +600,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
||||
page_to_pfn(page));
|
||||
entry = swp_entry_to_pte(swp_entry);
|
||||
} else {
|
||||
/*
|
||||
* For now we only support migrating to un-addressable device
|
||||
* memory.
|
||||
*/
|
||||
if (is_zone_device_page(page)) {
|
||||
if (is_zone_device_page(page) &&
|
||||
!is_device_coherent_page(page)) {
|
||||
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
|
||||
goto abort;
|
||||
}
|
||||
@@ -683,6 +686,12 @@ void migrate_vma_pages(struct migrate_vma *migrate)
|
||||
}
|
||||
|
||||
if (!page) {
|
||||
/*
|
||||
* The only time there is no vma is when called from
|
||||
* migrate_device_coherent_page(). However this isn't
|
||||
* called if the page could not be unmapped.
|
||||
*/
|
||||
VM_BUG_ON(!migrate->vma);
|
||||
if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
|
||||
continue;
|
||||
if (!notified) {
|
||||
@@ -701,10 +710,11 @@ void migrate_vma_pages(struct migrate_vma *migrate)
|
||||
|
||||
mapping = page_mapping(page);
|
||||
|
||||
if (is_device_private_page(newpage)) {
|
||||
if (is_device_private_page(newpage) ||
|
||||
is_device_coherent_page(newpage)) {
|
||||
/*
|
||||
* For now only support private anonymous when migrating
|
||||
* to un-addressable device memory.
|
||||
* For now only support anonymous memory migrating to
|
||||
* device private or coherent memory.
|
||||
*/
|
||||
if (mapping) {
|
||||
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
|
||||
@@ -791,3 +801,49 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(migrate_vma_finalize);
|
||||
|
||||
/*
|
||||
* Migrate a device coherent page back to normal memory. The caller should have
|
||||
* a reference on page which will be copied to the new page if migration is
|
||||
* successful or dropped on failure.
|
||||
*/
|
||||
int migrate_device_coherent_page(struct page *page)
|
||||
{
|
||||
unsigned long src_pfn, dst_pfn = 0;
|
||||
struct migrate_vma args;
|
||||
struct page *dpage;
|
||||
|
||||
WARN_ON_ONCE(PageCompound(page));
|
||||
|
||||
lock_page(page);
|
||||
src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
|
||||
args.src = &src_pfn;
|
||||
args.dst = &dst_pfn;
|
||||
args.cpages = 1;
|
||||
args.npages = 1;
|
||||
args.vma = NULL;
|
||||
|
||||
/*
|
||||
* We don't have a VMA and don't need to walk the page tables to find
|
||||
* the source page. So call migrate_vma_unmap() directly to unmap the
|
||||
* page as migrate_vma_setup() will fail if args.vma == NULL.
|
||||
*/
|
||||
migrate_vma_unmap(&args);
|
||||
if (!(src_pfn & MIGRATE_PFN_MIGRATE))
|
||||
return -EBUSY;
|
||||
|
||||
dpage = alloc_page(GFP_USER | __GFP_NOWARN);
|
||||
if (dpage) {
|
||||
lock_page(dpage);
|
||||
dst_pfn = migrate_pfn(page_to_pfn(dpage));
|
||||
}
|
||||
|
||||
migrate_vma_pages(&args);
|
||||
if (src_pfn & MIGRATE_PFN_MIGRATE)
|
||||
copy_highpage(dpage, page);
|
||||
migrate_vma_finalize(&args);
|
||||
|
||||
if (src_pfn & MIGRATE_PFN_MIGRATE)
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user