mm/hugetlb: remove prepare_hugepage_range()

Only mips and loongarch implemented this API, however what it does was
checking against stack overflow for either len or addr.  That's already
done in arch's arch_get_unmapped_area*() functions, even though it may not
be 100% identical checks.

For example, for both of the architectures, there will be a trivial
difference on how stack top was defined.  The old code uses STACK_TOP
which may be slightly smaller than TASK_SIZE on either of them, but the
hope is that shouldn't be a problem.

It means the whole API is pretty much obsolete at least now, remove it
completely.

Link: https://lkml.kernel.org/r/20250627160707.2124580-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Jann Horn <jannh@google.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Peter Xu
2025-06-27 12:07:07 -04:00
committed by Andrew Morton
parent c5e67d40a1
commit eff41389d8
5 changed files with 2 additions and 48 deletions

View File

@@ -10,20 +10,6 @@
uint64_t pmd_to_entrylo(unsigned long pmd_val);
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
return -EINVAL;
return 0;
}
#define __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)

View File

@@ -11,20 +11,6 @@
#include <asm/page.h>
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
return -EINVAL;
return 0;
}
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,

View File

@@ -179,12 +179,8 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h))
return -EINVAL;
if (flags & MAP_FIXED) {
if (addr & ~huge_page_mask(h))
return -EINVAL;
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
}
if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h)))
return -EINVAL;
if (addr)
addr0 = ALIGN(addr, huge_page_size(h));

View File

@@ -114,14 +114,6 @@ static inline int huge_pte_none_mostly(pte_t pte)
}
#endif
#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
return 0;
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)

View File

@@ -359,12 +359,6 @@ static inline void hugetlb_show_meminfo_node(int nid)
{
}
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
return -EINVAL;
}
static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
}