mm: hugetlb: directly pass order when allocate a hugetlb folio

Use order instead of struct hstate to remove huge_page_order() call from
all hugetlb folio allocation, also order_is_gigantic() is added to check
whether it is a gigantic order.

Link: https://lkml.kernel.org/r/20250910133958.301467-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang
2025-09-10 21:39:56 +08:00
committed by Andrew Morton
parent 4094d3434b
commit 4a25f995bd
4 changed files with 24 additions and 21 deletions

View File

@@ -788,9 +788,14 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT; return h->order + PAGE_SHIFT;
} }
static inline bool order_is_gigantic(unsigned int order)
{
return order > MAX_PAGE_ORDER;
}
static inline bool hstate_is_gigantic(struct hstate *h) static inline bool hstate_is_gigantic(struct hstate *h)
{ {
return huge_page_order(h) > MAX_PAGE_ORDER; return order_is_gigantic(huge_page_order(h));
} }
static inline unsigned int pages_per_huge_page(const struct hstate *h) static inline unsigned int pages_per_huge_page(const struct hstate *h)

View File

@@ -1473,17 +1473,16 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask) int nid, nodemask_t *nodemask)
{ {
struct folio *folio; struct folio *folio;
int order = huge_page_order(h);
bool retried = false; bool retried = false;
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = numa_mem_id(); nid = numa_mem_id();
retry: retry:
folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
if (!folio) { if (!folio) {
if (hugetlb_cma_exclusive_alloc()) if (hugetlb_cma_exclusive_alloc())
return NULL; return NULL;
@@ -1506,16 +1505,16 @@ retry:
} }
#else /* !CONFIG_CONTIG_ALLOC */ #else /* !CONFIG_CONTIG_ALLOC */
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
int nid, nodemask_t *nodemask) nodemask_t *nodemask)
{ {
return NULL; return NULL;
} }
#endif /* CONFIG_CONTIG_ALLOC */ #endif /* CONFIG_CONTIG_ALLOC */
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
int nid, nodemask_t *nodemask) nodemask_t *nodemask)
{ {
return NULL; return NULL;
} }
@@ -1926,11 +1925,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
return NULL; return NULL;
} }
static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
gfp_t gfp_mask, int nid, nodemask_t *nmask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
nodemask_t *node_alloc_noretry)
{ {
int order = huge_page_order(h);
struct folio *folio; struct folio *folio;
bool alloc_try_hard = true; bool alloc_try_hard = true;
@@ -1980,11 +1977,13 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
nodemask_t *node_alloc_noretry) nodemask_t *node_alloc_noretry)
{ {
struct folio *folio; struct folio *folio;
int order = huge_page_order(h);
if (hstate_is_gigantic(h)) if (order_is_gigantic(order))
folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
else else
folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
node_alloc_noretry);
if (folio) if (folio)
init_new_hugetlb_folio(h, folio); init_new_hugetlb_folio(h, folio);
return folio; return folio;
@@ -2872,7 +2871,7 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
* alloc_contig_range and them. Return -ENOMEM as this has the effect * alloc_contig_range and them. Return -ENOMEM as this has the effect
* of bailing out right away without further retrying. * of bailing out right away without further retrying.
*/ */
if (folio_order(folio) > MAX_PAGE_ORDER) if (order_is_gigantic(folio_order(folio)))
return -ENOMEM; return -ENOMEM;
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))

View File

@@ -26,11 +26,10 @@ void hugetlb_cma_free_folio(struct folio *folio)
} }
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask) int nid, nodemask_t *nodemask)
{ {
int node; int node;
int order = huge_page_order(h);
struct folio *folio = NULL; struct folio *folio = NULL;
if (hugetlb_cma[nid]) if (hugetlb_cma[nid])

View File

@@ -4,7 +4,7 @@
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
void hugetlb_cma_free_folio(struct folio *folio); void hugetlb_cma_free_folio(struct folio *folio);
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask); int nid, nodemask_t *nodemask);
struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
bool node_exact); bool node_exact);
@@ -18,8 +18,8 @@ static inline void hugetlb_cma_free_folio(struct folio *folio)
{ {
} }
static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h, static inline struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
gfp_t gfp_mask, int nid, nodemask_t *nodemask) int nid, nodemask_t *nodemask)
{ {
return NULL; return NULL;
} }