mm/mincore: use a helper for checking the swap cache

Introduce a mincore_swap helper for checking swap entries.  Move all swap
related logic and sanity debug check into it, and separate them from page
cache checking.

The performance is better after this commit.  mincore_page is never called
on a swap cache space now, so the logic can be simpler.  The sanity check
also covers more potential cases now, previously the WARN_ON only catches
potentially corrupted page table, now if shmem contains a swap entry with
!CONFIG_SWAP, a WARN will be triggered.  This changes the mincore value
when the WARN is triggered, but this shouldn't matter.  The WARN_ON means
the data is already corrupted or something is very wrong, so it really
should not happen.

Before this series:
mincore on a swaped out 16G anon mmap range:
Took 488220 us
mincore on 16G shmem mmap range:
Took 530272 us.

After this commit:
mincore on a swaped out 16G anon mmap range:
Took 446763 us
mincore on 16G shmem mmap range:
Took 460496 us.

About ~10% faster.

Link: https://lkml.kernel.org/r/20250811172018.48901-3-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kairui Song
2025-08-12 01:20:18 +08:00
committed by Andrew Morton
parent 27763edac9
commit 1f2052755c

View File

@@ -47,6 +47,48 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
return 0;
}
static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
{
struct swap_info_struct *si;
struct folio *folio = NULL;
unsigned char present = 0;
if (!IS_ENABLED(CONFIG_SWAP)) {
WARN_ON(1);
return 0;
}
/*
* Shmem mapping may contain swapin error entries, which are
* absent. Page table may contain migration or hwpoison
* entries which are always uptodate.
*/
if (non_swap_entry(entry))
return !shmem;
/*
* Shmem mapping lookup is lockless, so we need to grab the swap
* device. mincore page table walk locks the PTL, and the swap
* device is stable, avoid touching the si for better performance.
*/
if (shmem) {
si = get_swap_device(entry);
if (!si)
return 0;
}
folio = filemap_get_entry(swap_address_space(entry),
swap_cache_index(entry));
if (shmem)
put_swap_device(si);
/* The swap cache space contains either folio, shadow or NULL */
if (folio && !xa_is_value(folio)) {
present = folio_test_uptodate(folio);
folio_put(folio);
}
return present;
}
/*
* Later we can get more picky about what "in core" means precisely.
* For now, simply check to see if the page is in the page cache,
@@ -64,33 +106,15 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*/
if (IS_ENABLED(CONFIG_SWAP) && shmem_mapping(mapping)) {
folio = filemap_get_entry(mapping, index);
/*
* shmem/tmpfs may return swap: account for swapcache
* page too.
*/
folio = filemap_get_entry(mapping, index);
if (folio) {
if (xa_is_value(folio)) {
struct swap_info_struct *si;
swp_entry_t swp = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(swp))
if (shmem_mapping(mapping))
return mincore_swap(radix_to_swp_entry(folio),
true);
else
return 0;
/* Prevent swap device to being swapoff under us */
si = get_swap_device(swp);
if (si) {
folio = filemap_get_folio(swap_address_space(swp),
swap_cache_index(swp));
put_swap_device(si);
} else {
return 0;
}
}
} else {
folio = filemap_get_folio(mapping, index);
}
if (!IS_ERR_OR_NULL(folio)) {
present = folio_test_uptodate(folio);
folio_put(folio);
}
@@ -168,23 +192,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
for (i = 0; i < step; i++)
vec[i] = 1;
} else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte);
if (non_swap_entry(entry)) {
/*
* migration or hwpoison entries are always
* uptodate
*/
*vec = 1;
} else {
#ifdef CONFIG_SWAP
*vec = mincore_page(swap_address_space(entry),
swap_cache_index(entry));
#else
WARN_ON(1);
*vec = 1;
#endif
}
*vec = mincore_swap(pte_to_swp_entry(pte), false);
}
vec += step;
}