mm: hugetlb: convert to account_new_hugetlb_folio()

In order to avoid the wrong nid passed into the account, and we did make
such mistake before, so it's better to move folio_nid() into
account_new_hugetlb_folio().

Link: https://lkml.kernel.org/r/20250910133958.301467-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang
2025-09-10 21:39:55 +08:00
committed by Andrew Morton
parent 902020f027
commit 4094d3434b

View File

@@ -1890,11 +1890,11 @@ void free_huge_folio(struct folio *folio)
/*
* Must be called with the hugetlb lock held
*/
static void __prep_account_new_huge_page(struct hstate *h, int nid)
static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
lockdep_assert_held(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
h->nr_huge_pages_node[folio_nid(folio)]++;
}
static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
@@ -2020,7 +2020,7 @@ static void prep_and_add_allocated_folios(struct hstate *h,
/* Add all new pool pages to free lists in one lock cycle */
spin_lock_irqsave(&hugetlb_lock, flags);
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
__prep_account_new_huge_page(h, folio_nid(folio));
account_new_hugetlb_folio(h, folio);
enqueue_hugetlb_folio(h, folio);
}
spin_unlock_irqrestore(&hugetlb_lock, flags);
@@ -2232,7 +2232,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
* as surplus_pages, otherwise it might confuse
* persistent_huge_pages() momentarily.
*/
__prep_account_new_huge_page(h, folio_nid(folio));
account_new_hugetlb_folio(h, folio);
/*
* We could have raced with the pool size change.
@@ -2270,7 +2270,7 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas
return NULL;
spin_lock_irq(&hugetlb_lock);
__prep_account_new_huge_page(h, folio_nid(folio));
account_new_hugetlb_folio(h, folio);
spin_unlock_irq(&hugetlb_lock);
/* fresh huge pages are frozen */
@@ -2829,7 +2829,7 @@ retry:
/*
* Ok, old_folio is still a genuine free hugepage. Remove it from
* the freelist and decrease the counters. These will be
* incremented again when calling __prep_account_new_huge_page()
* incremented again when calling account_new_hugetlb_folio()
* and enqueue_hugetlb_folio() for new_folio. The counters will
* remain stable since this happens under the lock.
*/
@@ -2839,7 +2839,7 @@ retry:
* Ref count on new_folio is already zero as it was dropped
* earlier. It can be directly added to the pool free list.
*/
__prep_account_new_huge_page(h, nid);
account_new_hugetlb_folio(h, new_folio);
enqueue_hugetlb_folio(h, new_folio);
/*
@@ -3313,7 +3313,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
hugetlb_bootmem_init_migratetype(folio, h);
/* Subdivide locks to achieve better parallel performance */
spin_lock_irqsave(&hugetlb_lock, flags);
__prep_account_new_huge_page(h, folio_nid(folio));
account_new_hugetlb_folio(h, folio);
enqueue_hugetlb_folio(h, folio);
spin_unlock_irqrestore(&hugetlb_lock, flags);
}