Merge tag 'mm-hotfixes-stable-2025-11-26-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "8 hotfixes.  4 are cc:stable, 7 are against mm/.

  All are singletons - please see the respective changelogs for details"

* tag 'mm-hotfixes-stable-2025-11-26-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/filemap: fix logic around SIGBUS in filemap_map_pages()
  mm/huge_memory: fix NULL pointer deference when splitting folio
  MAINTAINERS: add test_kho to KHO's entry
  mailmap: add entry for Sam Protsenko
  selftests/mm: fix division-by-zero in uffd-unit-tests
  mm/mmap_lock: reset maple state on lock_vma_under_rcu() retry
  mm/memfd: fix information leak in hugetlb folios
  mm: swap: remove duplicate nr_swap_pages decrement in get_swap_page_of_type()
This commit is contained in:
Linus Torvalds
2025-11-26 12:38:05 -08:00
8 changed files with 63 additions and 36 deletions

View File

@@ -691,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
Sachin P Sant <ssant@in.ibm.com>
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
Sam Protsenko <semen.protsenko@linaro.org>
Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com>
Sam Ravnborg <sam@mars.ravnborg.org>
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>

View File

@@ -13799,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst
F: Documentation/core-api/kho/*
F: include/linux/kexec_handover.h
F: kernel/kexec_handover.c
F: lib/test_kho.c
F: tools/testing/selftests/kho/
KEYS-ENCRYPTED

View File

@@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages,
unsigned long *rss, unsigned short *mmap_miss,
bool can_map_large)
pgoff_t file_end)
{
struct address_space *mapping = folio->mapping;
unsigned int ref_from_caller = 1;
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
@@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
unsigned long addr0;
/*
* Map the large folio fully where possible.
* Map the large folio fully where possible:
*
* The folio must not cross VMA or page table boundary.
* - The folio is fully within size of the file or belong
* to shmem/tmpfs;
* - The folio doesn't cross VMA boundary;
* - The folio doesn't cross page table boundary;
*/
addr0 = addr - start * PAGE_SIZE;
if (can_map_large && folio_within_vma(folio, vmf->vma) &&
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
folio_within_vma(folio, vmf->vma) &&
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
vmf->pte -= start;
page -= start;
@@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long rss = 0;
unsigned int nr_pages = 0, folio_type;
unsigned short mmap_miss = 0, mmap_miss_saved;
bool can_map_large;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end_pgoff = min(end_pgoff, file_end);
/*
* Do not allow to map with PTEs beyond i_size and with PMD
* across i_size to preserve SIGBUS semantics.
* Do not allow to map with PMD across i_size to preserve
* SIGBUS semantics.
*
* Make an exception for shmem/tmpfs that for long time
* intentionally mapped with PMDs across i_size.
*/
can_map_large = shmem_mapping(mapping) ||
file_end >= folio_next_index(folio);
if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
filemap_map_pmd(vmf, folio, start_pgoff)) {
ret = VM_FAULT_NOPAGE;
goto out;
}
@@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
else
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr,
nr_pages, &rss, &mmap_miss,
can_map_large);
nr_pages, &rss, &mmap_miss, file_end);
folio_unlock(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);

View File

@@ -3619,6 +3619,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
return -EINVAL;
/*
* Folios that just got truncated cannot get split. Signal to the
* caller that there was a race.
*
* TODO: this will also currently refuse shmem folios that are in the
* swapcache.
*/
if (!is_anon && !folio->mapping)
return -EBUSY;
if (new_order >= folio_order(folio))
return -EINVAL;
@@ -3659,18 +3669,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
gfp_t gfp;
mapping = folio->mapping;
/* Truncated ? */
/*
* TODO: add support for large shmem folio in swap cache.
* When shmem is in swap cache, mapping is NULL and
* folio_test_swapcache() is true.
*/
if (!mapping) {
ret = -EBUSY;
goto out;
}
min_order = mapping_min_folio_order(folio->mapping);
if (new_order < min_order) {
ret = -EINVAL;

View File

@@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
NULL,
gfp_mask);
if (folio) {
u32 hash;
/*
* Zero the folio to prevent information leaks to userspace.
* Use folio_zero_user() which is optimized for huge/gigantic
* pages. Pass 0 as addr_hint since this is not a faulting path
* and we don't have a user virtual address yet.
*/
folio_zero_user(folio, 0);
/*
* Mark the folio uptodate before adding to page cache,
* as required by filemap.c and other hugetlb paths.
*/
__folio_mark_uptodate(folio);
/*
* Serialize hugepage allocation and instantiation to prevent
* races with concurrent allocations, as required by all other
* callers of hugetlb_add_to_page_cache().
*/
hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
if (err) {
folio_put(folio);
goto err_unresv;

View File

@@ -241,6 +241,7 @@ retry:
if (PTR_ERR(vma) == -EAGAIN) {
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
mas_set(&mas, address);
goto retry;
}

View File

@@ -2005,10 +2005,8 @@ swp_entry_t get_swap_page_of_type(int type)
local_lock(&percpu_swap_cluster.lock);
offset = cluster_alloc_swap_entry(si, 0, 1);
local_unlock(&percpu_swap_cluster.lock);
if (offset) {
if (offset)
entry = swp_entry(si->type, offset);
atomic_long_dec(&nr_swap_pages);
}
}
put_swap_device(si);
}

View File

@@ -1758,10 +1758,15 @@ int main(int argc, char *argv[])
uffd_test_ops = mem_type->mem_ops;
uffd_test_case_ops = test->test_case_ops;
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {
gopts.page_size = default_huge_page_size();
else
if (gopts.page_size == 0) {
uffd_test_skip("huge page size is 0, feature missing?");
continue;
}
} else {
gopts.page_size = psize();
}
/* Ensure we have at least 2 pages */
gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
@@ -1776,12 +1781,6 @@ int main(int argc, char *argv[])
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
if ((mem_type->mem_flag == MEM_HUGETLB ||
mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
(default_huge_page_size() == 0)) {
uffd_test_skip("huge page size is 0, feature missing?");
continue;
}
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;