mirror of
https://github.com/torvalds/linux.git
synced 2025-12-01 07:26:02 +07:00
mm, swap: check page poison flag after locking it
Instead of checking the poison flag only in the fast swap cache lookup path, always check the poison flags after locking a swap cache folio. There are two reasons to do so. The folio is unstable and could be removed from the swap cache anytime, so it's totally possible that the folio is no longer the backing folio of a swap entry, and could be an irrelevant poisoned folio. We might mistakenly kill a faulting process. And it's totally possible or even common for the slow swap in path (swapin_readahead) to bring in a cached folio. The cache folio could be poisoned, too. Only checking the poison flag in the fast path will miss such folios. The race window is tiny, so it's very unlikely to happen, though. While at it, also add a unlikely prefix. Link: https://lkml.kernel.org/r/20250916160100.31545-5-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Acked-by: Chris Li <chrisl@kernel.org> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Nhat Pham <nphamcs@gmail.com> Suggested-by: Chris Li <chrisl@kernel.org> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: kernel test robot <oliver.sang@intel.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Yosry Ahmed <yosryahmed@google.com> Cc: Zi Yan <ziy@nvidia.com> Cc: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
a733d8de7f
commit
3518b931df
22
mm/memory.c
22
mm/memory.c
@@ -4661,10 +4661,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
goto out;
|
||||
|
||||
folio = swap_cache_get_folio(entry);
|
||||
if (folio) {
|
||||
if (folio)
|
||||
swap_update_readahead(folio, vma, vmf->address);
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
}
|
||||
swapcache = folio;
|
||||
|
||||
if (!folio) {
|
||||
@@ -4735,20 +4733,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
ret = VM_FAULT_MAJOR;
|
||||
count_vm_event(PGMAJFAULT);
|
||||
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
} else if (PageHWPoison(page)) {
|
||||
/*
|
||||
* hwpoisoned dirty swapcache pages are kept for killing
|
||||
* owner processes (which may be unknown at hwpoison time)
|
||||
*/
|
||||
ret = VM_FAULT_HWPOISON;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
ret |= folio_lock_or_retry(folio, vmf);
|
||||
if (ret & VM_FAULT_RETRY)
|
||||
goto out_release;
|
||||
|
||||
page = folio_file_page(folio, swp_offset(entry));
|
||||
if (swapcache) {
|
||||
/*
|
||||
* Make sure folio_free_swap() or swapoff did not release the
|
||||
@@ -4761,6 +4752,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
page_swap_entry(page).val != entry.val))
|
||||
goto out_page;
|
||||
|
||||
if (unlikely(PageHWPoison(page))) {
|
||||
/*
|
||||
* hwpoisoned dirty swapcache pages are kept for killing
|
||||
* owner processes (which may be unknown at hwpoison time)
|
||||
*/
|
||||
ret = VM_FAULT_HWPOISON;
|
||||
goto out_page;
|
||||
}
|
||||
|
||||
/*
|
||||
* KSM sometimes has to copy on read faults, for example, if
|
||||
* folio->index of non-ksm folios would be nonlinear inside the
|
||||
|
||||
Reference in New Issue
Block a user