Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: page_alloc: add __split_page()

Factor out the splitting of non-compound page from make_alloc_exact() and
split_page() into a new helper function __split_page().

While at it, convert the VM_BUG_ON_PAGE() into a VM_WARN_ON_PAGE().

Link: https://lkml.kernel.org/r/20260109093136.1491549-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Muchun Song <muchun.song@linux.dev>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Cc: Mark Brown <broonie@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kefeng Wang and committed by
Andrew Morton
a9deb800 01152bd2

+23 -8
+10
include/linux/mmdebug.h
··· 47 47 BUG(); \ 48 48 } \ 49 49 } while (0) 50 + #define VM_WARN_ON_PAGE(cond, page) ({ \ 51 + int __ret_warn = !!(cond); \ 52 + \ 53 + if (unlikely(__ret_warn)) { \ 54 + dump_page(page, "VM_WARN_ON_PAGE(" __stringify(cond)")");\ 55 + WARN_ON(1); \ 56 + } \ 57 + unlikely(__ret_warn); \ 58 + }) 50 59 #define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ 51 60 static bool __section(".data..once") __warned; \ 52 61 int __ret_warn_once = !!(cond); \ ··· 131 122 #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) 132 123 #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) 133 124 #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) 125 + #define VM_WARN_ON_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) 134 126 #define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) 135 127 #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) 136 128 #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+13 -8
mm/page_alloc.c
··· 3107 3107 folio_batch_reinit(folios); 3108 3108 } 3109 3109 3110 + static void __split_page(struct page *page, unsigned int order) 3111 + { 3112 + VM_WARN_ON_PAGE(PageCompound(page), page); 3113 + 3114 + split_page_owner(page, order, 0); 3115 + pgalloc_tag_split(page_folio(page), order, 0); 3116 + split_page_memcg(page, order); 3117 + } 3118 + 3110 3119 /* 3111 3120 * split_page takes a non-compound higher-order page, and splits it into 3112 3121 * n (1<<order) sub-pages: page[0..n] ··· 3128 3119 { 3129 3120 int i; 3130 3121 3131 - VM_BUG_ON_PAGE(PageCompound(page), page); 3132 - VM_BUG_ON_PAGE(!page_count(page), page); 3122 + VM_WARN_ON_PAGE(!page_count(page), page); 3133 3123 3134 3124 for (i = 1; i < (1 << order); i++) 3135 3125 set_page_refcounted(page + i); 3136 - split_page_owner(page, order, 0); 3137 - pgalloc_tag_split(page_folio(page), order, 0); 3138 - split_page_memcg(page, order); 3126 + 3127 + __split_page(page, order); 3139 3128 } 3140 3129 EXPORT_SYMBOL_GPL(split_page); 3141 3130 ··· 5396 5389 struct page *page = virt_to_page((void *)addr); 5397 5390 struct page *last = page + nr; 5398 5391 5399 - split_page_owner(page, order, 0); 5400 - pgalloc_tag_split(page_folio(page), order, 0); 5401 - split_page_memcg(page, order); 5392 + __split_page(page, order); 5402 5393 while (page < --last) 5403 5394 set_page_refcounted(last); 5404 5395