Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: hugetlb: directly pass order when allocate a hugetlb folio

Use order instead of struct hstate to remove huge_page_order() call from
all hugetlb folio allocation, also order_is_gigantic() is added to check
whether it is a gigantic order.

Link: https://lkml.kernel.org/r/20250910133958.301467-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kefeng Wang and committed by
Andrew Morton
4a25f995 4094d343

+24 -21
+6 -1
include/linux/hugetlb.h
··· 788 788 return h->order + PAGE_SHIFT; 789 789 } 790 790 791 + static inline bool order_is_gigantic(unsigned int order) 792 + { 793 + return order > MAX_PAGE_ORDER; 794 + } 795 + 791 796 static inline bool hstate_is_gigantic(struct hstate *h) 792 797 { 793 - return huge_page_order(h) > MAX_PAGE_ORDER; 798 + return order_is_gigantic(huge_page_order(h)); 794 799 } 795 800 796 801 static inline unsigned int pages_per_huge_page(const struct hstate *h)
+14 -15
mm/hugetlb.c
··· 1473 1473 1474 1474 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1475 1475 #ifdef CONFIG_CONTIG_ALLOC 1476 - static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1476 + static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, 1477 1477 int nid, nodemask_t *nodemask) 1478 1478 { 1479 1479 struct folio *folio; 1480 - int order = huge_page_order(h); 1481 1480 bool retried = false; 1482 1481 1483 1482 if (nid == NUMA_NO_NODE) 1484 1483 nid = numa_mem_id(); 1485 1484 retry: 1486 - folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); 1485 + folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask); 1487 1486 if (!folio) { 1488 1487 if (hugetlb_cma_exclusive_alloc()) 1489 1488 return NULL; ··· 1505 1506 } 1506 1507 1507 1508 #else /* !CONFIG_CONTIG_ALLOC */ 1508 - static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1509 - int nid, nodemask_t *nodemask) 1509 + static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid, 1510 + nodemask_t *nodemask) 1510 1511 { 1511 1512 return NULL; 1512 1513 } 1513 1514 #endif /* CONFIG_CONTIG_ALLOC */ 1514 1515 1515 1516 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1516 - static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1517 - int nid, nodemask_t *nodemask) 1517 + static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid, 1518 + nodemask_t *nodemask) 1518 1519 { 1519 1520 return NULL; 1520 1521 } ··· 1925 1926 return NULL; 1926 1927 } 1927 1928 1928 - static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 1929 - gfp_t gfp_mask, int nid, nodemask_t *nmask, 1930 - nodemask_t *node_alloc_noretry) 1929 + static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask, 1930 + int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) 1931 1931 { 1932 - int order = huge_page_order(h); 1933 1932 struct folio *folio; 1934 1933 bool alloc_try_hard = true; 1935 1934 ··· 1977 1980 nodemask_t *node_alloc_noretry) 1978 1981 { 1979 1982 struct folio *folio; 1983 + int order = huge_page_order(h); 1980 1984 1981 - if (hstate_is_gigantic(h)) 1982 - folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 1985 + if (order_is_gigantic(order)) 1986 + folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask); 1983 1987 else 1984 - folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); 1988 + folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask, 1989 + node_alloc_noretry); 1985 1990 if (folio) 1986 1991 init_new_hugetlb_folio(h, folio); 1987 1992 return folio; ··· 2871 2872 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2872 2873 * of bailing out right away without further retrying. 2873 2874 */ 2874 - if (folio_order(folio) > MAX_PAGE_ORDER) 2875 + if (order_is_gigantic(folio_order(folio))) 2875 2876 return -ENOMEM; 2876 2877 2877 2878 if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
+1 -2
mm/hugetlb_cma.c
··· 26 26 } 27 27 28 28 29 - struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, 29 + struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask, 30 30 int nid, nodemask_t *nodemask) 31 31 { 32 32 int node; 33 - int order = huge_page_order(h); 34 33 struct folio *folio = NULL; 35 34 36 35 if (hugetlb_cma[nid])
+3 -3
mm/hugetlb_cma.h
··· 4 4 5 5 #ifdef CONFIG_CMA 6 6 void hugetlb_cma_free_folio(struct folio *folio); 7 - struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, 7 + struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask, 8 8 int nid, nodemask_t *nodemask); 9 9 struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, 10 10 bool node_exact); ··· 18 18 { 19 19 } 20 20 21 - static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h, 22 - gfp_t gfp_mask, int nid, nodemask_t *nodemask) 21 + static inline struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask, 22 + int nid, nodemask_t *nodemask) 23 23 { 24 24 return NULL; 25 25 }