mirror of
https://github.com/torvalds/linux.git
synced 2025-11-30 23:16:01 +07:00
mm: hugetlb: directly pass order when allocate a hugetlb folio
Use order instead of struct hstate to remove huge_page_order() call from all hugetlb folio allocation, also order_is_gigantic() is added to check whether it is a gigantic order. Link: https://lkml.kernel.org/r/20250910133958.301467-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Jane Chu <jane.chu@oracle.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Brendan Jackman <jackmanb@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
4094d3434b
commit
4a25f995bd
@@ -788,9 +788,14 @@ static inline unsigned huge_page_shift(struct hstate *h)
|
||||
return h->order + PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool order_is_gigantic(unsigned int order)
|
||||
{
|
||||
return order > MAX_PAGE_ORDER;
|
||||
}
|
||||
|
||||
static inline bool hstate_is_gigantic(struct hstate *h)
|
||||
{
|
||||
return huge_page_order(h) > MAX_PAGE_ORDER;
|
||||
return order_is_gigantic(huge_page_order(h));
|
||||
}
|
||||
|
||||
static inline unsigned int pages_per_huge_page(const struct hstate *h)
|
||||
|
||||
29
mm/hugetlb.c
29
mm/hugetlb.c
@@ -1473,17 +1473,16 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
||||
#ifdef CONFIG_CONTIG_ALLOC
|
||||
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask)
|
||||
{
|
||||
struct folio *folio;
|
||||
int order = huge_page_order(h);
|
||||
bool retried = false;
|
||||
|
||||
if (nid == NUMA_NO_NODE)
|
||||
nid = numa_mem_id();
|
||||
retry:
|
||||
folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
|
||||
folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
|
||||
if (!folio) {
|
||||
if (hugetlb_cma_exclusive_alloc())
|
||||
return NULL;
|
||||
@@ -1506,16 +1505,16 @@ retry:
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CONTIG_ALLOC */
|
||||
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask)
|
||||
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
|
||||
nodemask_t *nodemask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_CONTIG_ALLOC */
|
||||
|
||||
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
|
||||
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask)
|
||||
static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
|
||||
nodemask_t *nodemask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -1926,11 +1925,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
|
||||
gfp_t gfp_mask, int nid, nodemask_t *nmask,
|
||||
nodemask_t *node_alloc_noretry)
|
||||
static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
|
||||
{
|
||||
int order = huge_page_order(h);
|
||||
struct folio *folio;
|
||||
bool alloc_try_hard = true;
|
||||
|
||||
@@ -1980,11 +1977,13 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
|
||||
nodemask_t *node_alloc_noretry)
|
||||
{
|
||||
struct folio *folio;
|
||||
int order = huge_page_order(h);
|
||||
|
||||
if (hstate_is_gigantic(h))
|
||||
folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
|
||||
if (order_is_gigantic(order))
|
||||
folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
|
||||
else
|
||||
folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry);
|
||||
folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
|
||||
node_alloc_noretry);
|
||||
if (folio)
|
||||
init_new_hugetlb_folio(h, folio);
|
||||
return folio;
|
||||
@@ -2872,7 +2871,7 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
|
||||
* alloc_contig_range and them. Return -ENOMEM as this has the effect
|
||||
* of bailing out right away without further retrying.
|
||||
*/
|
||||
if (folio_order(folio) > MAX_PAGE_ORDER)
|
||||
if (order_is_gigantic(folio_order(folio)))
|
||||
return -ENOMEM;
|
||||
|
||||
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
|
||||
|
||||
@@ -26,11 +26,10 @@ void hugetlb_cma_free_folio(struct folio *folio)
|
||||
}
|
||||
|
||||
|
||||
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask)
|
||||
{
|
||||
int node;
|
||||
int order = huge_page_order(h);
|
||||
struct folio *folio = NULL;
|
||||
|
||||
if (hugetlb_cma[nid])
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
void hugetlb_cma_free_folio(struct folio *folio);
|
||||
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask);
|
||||
struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
|
||||
bool node_exact);
|
||||
@@ -18,8 +18,8 @@ static inline void hugetlb_cma_free_folio(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h,
|
||||
gfp_t gfp_mask, int nid, nodemask_t *nodemask)
|
||||
static inline struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user