Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

memblock: move reserve_bootmem_range() to memblock.c and make it static

reserve_bootmem_region() is only called from
memmap_init_reserved_pages() and it was in mm/mm_init.c because of its
dependecies on static init_deferred_page().

Since init_deferred_page() is not static anymore, move
reserve_bootmem_region(), rename it to memmap_init_reserved_range() and
make it static.

Update the comment describing it to better reflect what the function
does and drop bogus comment about reserved pages in free_bootmem_page().

Update memblock test stubs to reflect the core changes.

Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: David Hildenbrand (Arm) <david@kernel.org>
Link: https://patch.msgid.link/20260323072042.3651061-1-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

+37 -41
-4
include/linux/bootmem_info.h
··· 44 44 { 45 45 enum bootmem_type type = bootmem_type(page); 46 46 47 - /* 48 - * The reserve_bootmem_region sets the reserved flag on bootmem 49 - * pages. 50 - */ 51 47 VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); 52 48 53 49 if (type == SECTION_INFO || type == MIX_SECTION_INFO)
-3
include/linux/mm.h
··· 3686 3686 3687 3687 extern void adjust_managed_page_count(struct page *page, long count); 3688 3688 3689 - extern void reserve_bootmem_region(phys_addr_t start, 3690 - phys_addr_t end, int nid); 3691 - 3692 3689 /* Free the reserved page into the buddy system, so it gets managed. */ 3693 3690 void free_reserved_page(struct page *page); 3694 3691
+28 -3
mm/memblock.c
··· 974 974 /* 975 975 * Initialize struct pages for free scratch memory. 976 976 * The struct pages for reserved scratch memory will be set up in 977 - * reserve_bootmem_region() 977 + * memmap_init_reserved_pages() 978 978 */ 979 979 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, 980 980 MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) { ··· 2241 2241 return end_pfn - start_pfn; 2242 2242 } 2243 2243 2244 + /* 2245 + * Initialised pages do not have PageReserved set. This function is called 2246 + * for each reserved range and marks the pages PageReserved. 2247 + * When deferred initialization of struct pages is enabled it also ensures 2248 + * that struct pages are properly initialised. 2249 + */ 2250 + static void __init memmap_init_reserved_range(phys_addr_t start, 2251 + phys_addr_t end, int nid) 2252 + { 2253 + unsigned long pfn; 2254 + 2255 + for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) { 2256 + struct page *page = pfn_to_page(pfn); 2257 + 2258 + init_deferred_page(pfn, nid); 2259 + 2260 + /* 2261 + * no need for atomic set_bit because the struct 2262 + * page is not visible yet so nobody should 2263 + * access it yet. 2264 + */ 2265 + __SetPageReserved(page); 2266 + } 2267 + } 2268 + 2244 2269 static void __init memmap_init_reserved_pages(void) 2245 2270 { 2246 2271 struct memblock_region *region; ··· 2285 2260 end = start + region->size; 2286 2261 2287 2262 if (memblock_is_nomap(region)) 2288 - reserve_bootmem_region(start, end, nid); 2263 + memmap_init_reserved_range(start, end, nid); 2289 2264 2290 2265 memblock_set_node(start, region->size, &memblock.reserved, nid); 2291 2266 } ··· 2310 2285 if (!numa_valid_node(nid)) 2311 2286 nid = early_pfn_to_nid(PFN_DOWN(start)); 2312 2287 2313 - reserve_bootmem_region(start, end, nid); 2288 + memmap_init_reserved_range(start, end, nid); 2314 2289 } 2315 2290 } 2316 2291 }
-25
mm/mm_init.c
··· 772 772 __init_deferred_page(pfn, nid); 773 773 } 774 774 775 - /* 776 - * Initialised pages do not have PageReserved set. This function is 777 - * called for each range allocated by the bootmem allocator and 778 - * marks the pages PageReserved. The remaining valid pages are later 779 - * sent to the buddy page allocator. 780 - */ 781 - void __meminit reserve_bootmem_region(phys_addr_t start, 782 - phys_addr_t end, int nid) 783 - { 784 - unsigned long pfn; 785 - 786 - for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) { 787 - struct page *page = pfn_to_page(pfn); 788 - 789 - __init_deferred_page(pfn, nid); 790 - 791 - /* 792 - * no need for atomic set_bit because the struct 793 - * page is not visible yet so nobody should 794 - * access it yet. 795 - */ 796 - __SetPageReserved(page); 797 - } 798 - } 799 - 800 775 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 801 776 static bool __meminit 802 777 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
-2
tools/include/linux/mm.h
··· 32 32 return (phys_addr_t)address; 33 33 } 34 34 35 - void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid); 36 - 37 35 static inline void totalram_pages_inc(void) 38 36 { 39 37 }
+9
tools/testing/memblock/internal.h
··· 29 29 return 0; 30 30 } 31 31 32 + #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ 33 + for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++) 34 + 35 + static inline void init_deferred_page(unsigned long pfn, int nid) 36 + { 37 + } 38 + 39 + #define __SetPageReserved(p) ((void)(p)) 40 + 32 41 #endif
-4
tools/testing/memblock/mmzone.c
··· 11 11 return NULL; 12 12 } 13 13 14 - void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid) 15 - { 16 - } 17 - 18 14 void atomic_long_set(atomic_long_t *v, long i) 19 15 { 20 16 }