Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'memblock-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:

- improve debuggability of reserve_mem kernel parameter handling with
print outs in case of a failure and debugfs info showing what was
actually reserved

- Make memblock_free_late() and free_reserved_area() use the same core
logic for freeing the memory to buddy and ensure it takes care of
updating memblock arrays when ARCH_KEEP_MEMBLOCK is enabled.

* tag 'memblock-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
x86/alternative: delay freeing of smp_locks section
memblock: warn when freeing reserved memory before memory map is initialized
memblock, treewide: make memblock_free() handle late freeing
memblock: make free_reserved_area() update memblock if ARCH_KEEP_MEMBLOCK=y
memblock: extract page freeing from free_reserved_area() into a helper
memblock: make free_reserved_area() more robust
mm: move free_reserved_area() to mm/memblock.c
powerpc: opal-core: pair alloc_pages_exact() with free_pages_exact()
powerpc: fadump: pair alloc_pages_exact() with free_pages_exact()
memblock: reserve_mem: fix end caclulation in reserve_mem_release_by_name()
memblock: move reserve_bootmem_range() to memblock.c and make it static
memblock: Add reserve_mem debugfs info
memblock: Print out errors on reserve_mem parser

+272 -199
-3
arch/arm64/mm/init.c
··· 397 397 WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE)); 398 398 WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE)); 399 399 400 - /* Delete __init region from memblock.reserved. */ 401 - memblock_free(lm_init_begin, lm_init_end - lm_init_begin); 402 - 403 400 free_reserved_area(lm_init_begin, lm_init_end, 404 401 POISON_FREE_INITMEM, "unused kernel"); 405 402 /*
+2 -14
arch/powerpc/kernel/fadump.c
··· 775 775 776 776 static void *__init fadump_alloc_buffer(unsigned long size) 777 777 { 778 - unsigned long count, i; 779 - struct page *page; 780 - void *vaddr; 781 - 782 - vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 783 - if (!vaddr) 784 - return NULL; 785 - 786 - count = PAGE_ALIGN(size) / PAGE_SIZE; 787 - page = virt_to_page(vaddr); 788 - for (i = 0; i < count; i++) 789 - mark_page_reserved(page + i); 790 - return vaddr; 778 + return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 791 779 } 792 780 793 781 static void fadump_free_buffer(unsigned long vaddr, unsigned long size) 794 782 { 795 - free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL); 783 + free_pages_exact((void *)vaddr, size); 796 784 } 797 785 798 786 s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)
+1 -10
arch/powerpc/platforms/powernv/opal-core.c
··· 303 303 struct device_node *dn; 304 304 struct opalcore *new; 305 305 loff_t opalcore_off; 306 - struct page *page; 307 306 Elf64_Phdr *phdr; 308 307 Elf64_Ehdr *elf; 309 308 int i, ret; ··· 327 328 oc_conf->opalcorebuf_sz = 0; 328 329 return -ENOMEM; 329 330 } 330 - count = oc_conf->opalcorebuf_sz / PAGE_SIZE; 331 - page = virt_to_page(oc_conf->opalcorebuf); 332 - for (i = 0; i < count; i++) 333 - mark_page_reserved(page + i); 334 - 335 331 pr_debug("opalcorebuf = 0x%llx\n", (u64)oc_conf->opalcorebuf); 336 332 337 333 /* Read OPAL related device-tree entries */ ··· 431 437 432 438 /* free the buffer used for setting up OPAL core */ 433 439 if (oc_conf->opalcorebuf) { 434 - void *end = (void *)((u64)oc_conf->opalcorebuf + 435 - oc_conf->opalcorebuf_sz); 436 - 437 - free_reserved_area(oc_conf->opalcorebuf, end, -1, NULL); 440 + free_pages_exact(oc_conf->opalcorebuf, oc_conf->opalcorebuf_sz); 438 441 oc_conf->opalcorebuf = NULL; 439 442 oc_conf->opalcorebuf_sz = 0; 440 443 }
+1 -3
arch/sparc/kernel/mdesc.c
··· 183 183 static void __init mdesc_memblock_free(struct mdesc_handle *hp) 184 184 { 185 185 unsigned int alloc_size; 186 - unsigned long start; 187 186 188 187 BUG_ON(refcount_read(&hp->refcnt) != 0); 189 188 BUG_ON(!list_empty(&hp->list)); 190 189 191 190 alloc_size = PAGE_ALIGN(hp->handle_size); 192 - start = __pa(hp); 193 - memblock_free_late(start, alloc_size); 191 + memblock_free(hp, alloc_size); 194 192 } 195 193 196 194 static struct mdesc_mem_ops memblock_mdesc_ops = {
+18 -6
arch/x86/kernel/alternative.c
··· 2448 2448 __smp_locks, __smp_locks_end, 2449 2449 _text, _etext); 2450 2450 } 2451 - 2452 - if (!uniproc_patched || num_possible_cpus() == 1) { 2453 - free_init_pages("SMP alternatives", 2454 - (unsigned long)__smp_locks, 2455 - (unsigned long)__smp_locks_end); 2456 - } 2457 2451 #endif 2458 2452 2459 2453 restart_nmi(); ··· 2455 2461 2456 2462 alt_reloc_selftest(); 2457 2463 } 2464 + 2465 + #ifdef CONFIG_SMP 2466 + /* 2467 + * With CONFIG_DEFERRED_STRUCT_PAGE_INIT enabled we can free_init_pages() only 2468 + * after the deferred initialization of the memory map is complete. 2469 + */ 2470 + static int __init free_smp_locks(void) 2471 + { 2472 + if (!uniproc_patched || num_possible_cpus() == 1) { 2473 + free_init_pages("SMP alternatives", 2474 + (unsigned long)__smp_locks, 2475 + (unsigned long)__smp_locks_end); 2476 + } 2477 + 2478 + return 0; 2479 + } 2480 + arch_initcall(free_smp_locks); 2481 + #endif 2458 2482 2459 2483 /** 2460 2484 * text_poke_early - Update instructions on a live kernel at boot time
+1 -1
arch/x86/kernel/setup.c
··· 426 426 if (!ima_kexec_buffer_size) 427 427 return -ENOENT; 428 428 429 - memblock_free_late(ima_kexec_buffer_phys, 429 + memblock_phys_free(ima_kexec_buffer_phys, 430 430 ima_kexec_buffer_size); 431 431 432 432 ima_kexec_buffer_phys = 0;
+1 -4
arch/x86/platform/efi/memmap.c
··· 34 34 void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) 35 35 { 36 36 if (flags & EFI_MEMMAP_MEMBLOCK) { 37 - if (slab_is_available()) 38 - memblock_free_late(phys, size); 39 - else 40 - memblock_phys_free(phys, size); 37 + memblock_phys_free(phys, size); 41 38 } else if (flags & EFI_MEMMAP_SLAB) { 42 39 struct page *p = pfn_to_page(PHYS_PFN(phys)); 43 40 unsigned int order = get_order(size);
+1 -1
arch/x86/platform/efi/quirks.c
··· 372 372 * doesn't make sense as far as the firmware is 373 373 * concerned, but it does provide us with a way to tag 374 374 * those regions that must not be paired with 375 - * memblock_free_late(). 375 + * memblock_phys_free(). 376 376 */ 377 377 md->attribute |= EFI_MEMORY_RUNTIME; 378 378 }
+1 -1
drivers/firmware/efi/apple-properties.c
··· 226 226 */ 227 227 data->len = 0; 228 228 memunmap(data); 229 - memblock_free_late(pa_data + sizeof(*data), data_len); 229 + memblock_phys_free(pa_data + sizeof(*data), data_len); 230 230 231 231 return ret; 232 232 }
+1 -1
drivers/of/kexec.c
··· 175 175 if (ret) 176 176 return ret; 177 177 178 - memblock_free_late(addr, size); 178 + memblock_phys_free(addr, size); 179 179 return 0; 180 180 } 181 181 #endif
-4
include/linux/bootmem_info.h
··· 44 44 { 45 45 enum bootmem_type type = bootmem_type(page); 46 46 47 - /* 48 - * The reserve_bootmem_region sets the reserved flag on bootmem 49 - * pages. 50 - */ 51 47 VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); 52 48 53 49 if (type == SECTION_INFO || type == MIX_SECTION_INFO)
-2
include/linux/memblock.h
··· 173 173 struct memblock_type *type_b, phys_addr_t *out_start, 174 174 phys_addr_t *out_end, int *out_nid); 175 175 176 - void memblock_free_late(phys_addr_t base, phys_addr_t size); 177 - 178 176 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 179 177 static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, 180 178 phys_addr_t *out_start,
-3
include/linux/mm.h
··· 3928 3928 3929 3929 extern void adjust_managed_page_count(struct page *page, long count); 3930 3930 3931 - extern void reserve_bootmem_region(phys_addr_t start, 3932 - phys_addr_t end, int nid); 3933 - 3934 3931 /* Free the reserved page into the buddy system, so it gets managed. */ 3935 3932 void free_reserved_page(struct page *page); 3936 3933
-7
init/initramfs.c
··· 652 652 653 653 void __weak __init free_initrd_mem(unsigned long start, unsigned long end) 654 654 { 655 - #ifdef CONFIG_ARCH_KEEP_MEMBLOCK 656 - unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE); 657 - unsigned long aligned_end = ALIGN(end, PAGE_SIZE); 658 - 659 - memblock_free((void *)aligned_start, aligned_end - aligned_start); 660 - #endif 661 - 662 655 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, 663 656 "initrd"); 664 657 }
+3 -3
kernel/dma/swiotlb.c
··· 547 547 free_pages(tbl_vaddr, get_order(tbl_size)); 548 548 free_pages((unsigned long)mem->slots, get_order(slots_size)); 549 549 } else { 550 - memblock_free_late(__pa(mem->areas), 550 + memblock_free(mem->areas, 551 551 array_size(sizeof(*mem->areas), mem->nareas)); 552 - memblock_free_late(mem->start, tbl_size); 553 - memblock_free_late(__pa(mem->slots), slots_size); 552 + memblock_phys_free(mem->start, tbl_size); 553 + memblock_free(mem->slots, slots_size); 554 554 } 555 555 556 556 memset(mem, 0, sizeof(*mem));
+1 -1
lib/bootconfig.c
··· 66 66 if (early) 67 67 memblock_free(addr, size); 68 68 else if (addr) 69 - memblock_free_late(__pa(addr), size); 69 + memblock_free(addr, size); 70 70 } 71 71 72 72 #else /* !__KERNEL__ */
+10
mm/internal.h
··· 1322 1322 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1323 1323 DECLARE_STATIC_KEY_TRUE(deferred_pages); 1324 1324 1325 + static inline bool deferred_pages_enabled(void) 1326 + { 1327 + return static_branch_unlikely(&deferred_pages); 1328 + } 1329 + 1325 1330 bool __init deferred_grow_zone(struct zone *zone, unsigned int order); 1331 + #else 1332 + static inline bool deferred_pages_enabled(void) 1333 + { 1334 + return false; 1335 + } 1326 1336 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1327 1337 1328 1338 void init_deferred_page(unsigned long pfn, int nid);
+2 -2
mm/kfence/core.c
··· 736 736 * fails for the first page, and therefore expect addr==__kfence_pool in 737 737 * most failure cases. 738 738 */ 739 - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); 739 + memblock_free((void *)addr, KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); 740 740 __kfence_pool = NULL; 741 741 742 - memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); 742 + memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE); 743 743 kfence_metadata_init = NULL; 744 744 745 745 return false;
+178 -53
mm/memblock.c
··· 17 17 #include <linux/seq_file.h> 18 18 #include <linux/memblock.h> 19 19 #include <linux/mutex.h> 20 + #include <linux/string_helpers.h> 20 21 21 22 #ifdef CONFIG_KEXEC_HANDOVER 22 23 #include <linux/libfdt.h> ··· 385 384 */ 386 385 void __init memblock_discard(void) 387 386 { 388 - phys_addr_t addr, size; 387 + phys_addr_t size; 388 + void *addr; 389 389 390 390 if (memblock.reserved.regions != memblock_reserved_init_regions) { 391 - addr = __pa(memblock.reserved.regions); 391 + addr = memblock.reserved.regions; 392 392 size = PAGE_ALIGN(sizeof(struct memblock_region) * 393 393 memblock.reserved.max); 394 394 if (memblock_reserved_in_slab) 395 - kfree(memblock.reserved.regions); 395 + kfree(addr); 396 396 else 397 - memblock_free_late(addr, size); 397 + memblock_free(addr, size); 398 398 } 399 399 400 400 if (memblock.memory.regions != memblock_memory_init_regions) { 401 - addr = __pa(memblock.memory.regions); 401 + addr = memblock.memory.regions; 402 402 size = PAGE_ALIGN(sizeof(struct memblock_region) * 403 403 memblock.memory.max); 404 404 if (memblock_memory_in_slab) 405 - kfree(memblock.memory.regions); 405 + kfree(addr); 406 406 else 407 - memblock_free_late(addr, size); 407 + memblock_free(addr, size); 408 408 } 409 409 410 410 memblock_memory = NULL; ··· 895 893 return memblock_remove_range(&memblock.memory, base, size); 896 894 } 897 895 896 + static unsigned long __free_reserved_area(phys_addr_t start, phys_addr_t end, 897 + int poison) 898 + { 899 + unsigned long pages = 0, pfn; 900 + 901 + if (deferred_pages_enabled()) { 902 + WARN(1, "Cannot free reserved memory because of deferred initialization of the memory map"); 903 + return 0; 904 + } 905 + 906 + for_each_valid_pfn(pfn, PFN_UP(start), PFN_DOWN(end)) { 907 + struct page *page = pfn_to_page(pfn); 908 + void *direct_map_addr; 909 + 910 + /* 911 + * 'direct_map_addr' might be different from the kernel virtual 912 + * address because some architectures use aliases. 913 + * Going via physical address, pfn_to_page() and page_address() 914 + * ensures that we get a _writeable_ alias for the memset(). 915 + */ 916 + direct_map_addr = page_address(page); 917 + /* 918 + * Perform a kasan-unchecked memset() since this memory 919 + * has not been initialized. 920 + */ 921 + direct_map_addr = kasan_reset_tag(direct_map_addr); 922 + if ((unsigned int)poison <= 0xFF) 923 + memset(direct_map_addr, poison, PAGE_SIZE); 924 + 925 + free_reserved_page(page); 926 + pages++; 927 + } 928 + return pages; 929 + } 930 + 931 + unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 932 + { 933 + phys_addr_t start_pa, end_pa; 934 + unsigned long pages; 935 + 936 + /* 937 + * end is the first address past the region and it may be beyond what 938 + * __pa() or __pa_symbol() can handle. 939 + * Use the address included in the range for the conversion and add back 940 + * 1 afterwards. 941 + */ 942 + if (__is_kernel((unsigned long)start)) { 943 + start_pa = __pa_symbol(start); 944 + end_pa = __pa_symbol(end - 1) + 1; 945 + } else { 946 + start_pa = __pa(start); 947 + end_pa = __pa(end - 1) + 1; 948 + } 949 + 950 + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { 951 + if (start_pa < end_pa) 952 + memblock_remove_range(&memblock.reserved, 953 + start_pa, end_pa - start_pa); 954 + } 955 + 956 + pages = __free_reserved_area(start_pa, end_pa, poison); 957 + if (pages && s) 958 + pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 959 + 960 + return pages; 961 + } 962 + 898 963 /** 899 964 * memblock_free - free boot memory allocation 900 965 * @ptr: starting address of the boot memory allocation 901 966 * @size: size of the boot memory block in bytes 902 967 * 903 968 * Free boot memory block previously allocated by memblock_alloc_xx() API. 904 - * The freeing memory will not be released to the buddy allocator. 969 + * If called after the buddy allocator is available, the memory is released to 970 + * the buddy allocator. 905 971 */ 906 972 void __init_memblock memblock_free(void *ptr, size_t size) 907 973 { ··· 983 913 * @size: size of the boot memory block in bytes 984 914 * 985 915 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. 986 - * The freeing memory will not be released to the buddy allocator. 916 + * If called after the buddy allocator is available, the memory is released to 917 + * the buddy allocator. 987 918 */ 988 919 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) 989 920 { 990 921 phys_addr_t end = base + size - 1; 922 + int ret; 991 923 992 924 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 993 925 &base, &end, (void *)_RET_IP_); 994 926 995 927 kmemleak_free_part_phys(base, size); 996 - return memblock_remove_range(&memblock.reserved, base, size); 928 + ret = memblock_remove_range(&memblock.reserved, base, size); 929 + 930 + if (slab_is_available()) 931 + __free_reserved_area(base, base + size, -1); 932 + 933 + return ret; 997 934 } 998 935 999 936 int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size, ··· 1050 973 /* 1051 974 * Initialize struct pages for free scratch memory. 1052 975 * The struct pages for reserved scratch memory will be set up in 1053 - * reserve_bootmem_region() 976 + * memmap_init_reserved_pages() 1054 977 */ 1055 978 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, 1056 979 MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) { ··· 1843 1766 return addr; 1844 1767 } 1845 1768 1846 - /** 1847 - * memblock_free_late - free pages directly to buddy allocator 1848 - * @base: phys starting address of the boot memory block 1849 - * @size: size of the boot memory block in bytes 1850 - * 1851 - * This is only useful when the memblock allocator has already been torn 1852 - * down, but we are still initializing the system. Pages are released directly 1853 - * to the buddy allocator. 1854 - */ 1855 - void __init memblock_free_late(phys_addr_t base, phys_addr_t size) 1856 - { 1857 - phys_addr_t cursor, end; 1858 - 1859 - end = base + size - 1; 1860 - memblock_dbg("%s: [%pa-%pa] %pS\n", 1861 - __func__, &base, &end, (void *)_RET_IP_); 1862 - kmemleak_free_part_phys(base, size); 1863 - cursor = PFN_UP(base); 1864 - end = PFN_DOWN(base + size); 1865 - 1866 - for (; cursor < end; cursor++) { 1867 - memblock_free_pages(cursor, 0); 1868 - totalram_pages_inc(); 1869 - } 1870 - } 1871 - 1872 1769 /* 1873 1770 * Remaining API functions 1874 1771 */ ··· 2306 2255 return end_pfn - start_pfn; 2307 2256 } 2308 2257 2258 + /* 2259 + * Initialised pages do not have PageReserved set. This function is called 2260 + * for each reserved range and marks the pages PageReserved. 2261 + * When deferred initialization of struct pages is enabled it also ensures 2262 + * that struct pages are properly initialised. 2263 + */ 2264 + static void __init memmap_init_reserved_range(phys_addr_t start, 2265 + phys_addr_t end, int nid) 2266 + { 2267 + unsigned long pfn; 2268 + 2269 + for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) { 2270 + struct page *page = pfn_to_page(pfn); 2271 + 2272 + init_deferred_page(pfn, nid); 2273 + 2274 + /* 2275 + * no need for atomic set_bit because the struct 2276 + * page is not visible yet so nobody should 2277 + * access it yet. 2278 + */ 2279 + __SetPageReserved(page); 2280 + } 2281 + } 2282 + 2309 2283 static void __init memmap_init_reserved_pages(void) 2310 2284 { 2311 2285 struct memblock_region *region; ··· 2350 2274 end = start + region->size; 2351 2275 2352 2276 if (memblock_is_nomap(region)) 2353 - reserve_bootmem_region(start, end, nid); 2277 + memmap_init_reserved_range(start, end, nid); 2354 2278 2355 2279 memblock_set_node(start, region->size, &memblock.reserved, nid); 2356 2280 } ··· 2375 2299 if (!numa_valid_node(nid)) 2376 2300 nid = early_pfn_to_nid(PFN_DOWN(start)); 2377 2301 2378 - reserve_bootmem_region(start, end, nid); 2302 + memmap_init_reserved_range(start, end, nid); 2379 2303 } 2380 2304 } 2381 2305 } ··· 2525 2449 return 0; 2526 2450 2527 2451 start = phys_to_virt(map->start); 2528 - end = start + map->size - 1; 2452 + end = start + map->size; 2529 2453 snprintf(buf, sizeof(buf), "reserve_mem:%s", name); 2530 2454 free_reserved_area(start, end, 0, buf); 2531 2455 map->size = 0; ··· 2733 2657 int len; 2734 2658 2735 2659 if (!p) 2736 - return -EINVAL; 2660 + goto err_param; 2737 2661 2738 2662 /* Check if there's room for more reserved memory */ 2739 - if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES) 2663 + if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES) { 2664 + pr_err("reserve_mem: no more room for reserved memory\n"); 2740 2665 return -EBUSY; 2666 + } 2741 2667 2742 2668 oldp = p; 2743 2669 size = memparse(p, &p); 2744 2670 if (!size || p == oldp) 2745 - return -EINVAL; 2671 + goto err_param; 2746 2672 2747 2673 if (*p != ':') 2748 - return -EINVAL; 2674 + goto err_param; 2749 2675 2750 2676 align = memparse(p+1, &p); 2751 2677 if (*p != ':') 2752 - return -EINVAL; 2678 + goto err_param; 2753 2679 2754 2680 /* 2755 2681 * memblock_phys_alloc() doesn't like a zero size align, ··· 2765 2687 2766 2688 /* name needs to have length but not too big */ 2767 2689 if (!len || len >= RESERVE_MEM_NAME_SIZE) 2768 - return -EINVAL; 2690 + goto err_param; 2769 2691 2770 2692 /* Make sure that name has text */ 2771 2693 for (p = name; *p; p++) { ··· 2773 2695 break; 2774 2696 } 2775 2697 if (!*p) 2776 - return -EINVAL; 2698 + goto err_param; 2777 2699 2778 2700 /* Make sure the name is not already used */ 2779 - if (reserve_mem_find_by_name(name, &start, &tmp)) 2701 + if (reserve_mem_find_by_name(name, &start, &tmp)) { 2702 + pr_err("reserve_mem: name \"%s\" was already used\n", name); 2780 2703 return -EBUSY; 2704 + } 2781 2705 2782 2706 /* Pick previous allocations up from KHO if available */ 2783 2707 if (reserve_mem_kho_revive(name, size, align)) ··· 2787 2707 2788 2708 /* TODO: Allocation must be outside of scratch region */ 2789 2709 start = memblock_phys_alloc(size, align); 2790 - if (!start) 2710 + if (!start) { 2711 + pr_err("reserve_mem: memblock allocation failed\n"); 2791 2712 return -ENOMEM; 2713 + } 2792 2714 2793 2715 reserved_mem_add(start, size, name); 2794 2716 2795 2717 return 1; 2718 + err_param: 2719 + pr_err("reserve_mem: empty or malformed parameter\n"); 2720 + return -EINVAL; 2796 2721 } 2797 2722 __setup("reserve_mem=", reserve_mem); 2798 2723 2799 - #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 2724 + #ifdef CONFIG_DEBUG_FS 2725 + #ifdef CONFIG_ARCH_KEEP_MEMBLOCK 2800 2726 static const char * const flagname[] = { 2801 2727 [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG", 2802 2728 [ilog2(MEMBLOCK_MIRROR)] = "MIRROR", ··· 2849 2763 } 2850 2764 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 2851 2765 2852 - static int __init memblock_init_debugfs(void) 2766 + static inline void memblock_debugfs_expose_arrays(struct dentry *root) 2853 2767 { 2854 - struct dentry *root = debugfs_create_dir("memblock", NULL); 2855 - 2856 2768 debugfs_create_file("memory", 0444, root, 2857 2769 &memblock.memory, &memblock_debug_fops); 2858 2770 debugfs_create_file("reserved", 0444, root, ··· 2859 2775 debugfs_create_file("physmem", 0444, root, &physmem, 2860 2776 &memblock_debug_fops); 2861 2777 #endif 2778 + } 2862 2779 2780 + #else 2781 + 2782 + static inline void memblock_debugfs_expose_arrays(struct dentry *root) { } 2783 + 2784 + #endif /* CONFIG_ARCH_KEEP_MEMBLOCK */ 2785 + 2786 + static int memblock_reserve_mem_show(struct seq_file *m, void *private) 2787 + { 2788 + struct reserve_mem_table *map; 2789 + char txtsz[16]; 2790 + 2791 + guard(mutex)(&reserve_mem_lock); 2792 + for (int i = 0; i < reserved_mem_count; i++) { 2793 + map = &reserved_mem_table[i]; 2794 + if (!map->size) 2795 + continue; 2796 + 2797 + memset(txtsz, 0, sizeof(txtsz)); 2798 + string_get_size(map->size, 1, STRING_UNITS_2, txtsz, sizeof(txtsz)); 2799 + seq_printf(m, "%s\t\t(%s)\n", map->name, txtsz); 2800 + } 2801 + 2802 + return 0; 2803 + } 2804 + DEFINE_SHOW_ATTRIBUTE(memblock_reserve_mem); 2805 + 2806 + static int __init memblock_init_debugfs(void) 2807 + { 2808 + struct dentry *root; 2809 + 2810 + if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !reserved_mem_count) 2811 + return 0; 2812 + 2813 + root = debugfs_create_dir("memblock", NULL); 2814 + 2815 + if (reserved_mem_count) 2816 + debugfs_create_file("reserve_mem_param", 0444, root, NULL, 2817 + &memblock_reserve_mem_fops); 2818 + 2819 + memblock_debugfs_expose_arrays(root); 2863 2820 return 0; 2864 2821 } 2865 2822 __initcall(memblock_init_debugfs);
-25
mm/mm_init.c
··· 783 783 __init_deferred_page(pfn, nid); 784 784 } 785 785 786 - /* 787 - * Initialised pages do not have PageReserved set. This function is 788 - * called for each range allocated by the bootmem allocator and 789 - * marks the pages PageReserved. The remaining valid pages are later 790 - * sent to the buddy page allocator. 791 - */ 792 - void __meminit reserve_bootmem_region(phys_addr_t start, 793 - phys_addr_t end, int nid) 794 - { 795 - unsigned long pfn; 796 - 797 - for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) { 798 - struct page *page = pfn_to_page(pfn); 799 - 800 - __init_deferred_page(pfn, nid); 801 - 802 - /* 803 - * no need for atomic set_bit because the struct 804 - * page is not visible yet so nobody should 805 - * access it yet. 806 - */ 807 - __SetPageReserved(page); 808 - } 809 - } 810 - 811 786 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 812 787 static bool __meminit 813 788 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
-46
mm/page_alloc.c
··· 297 297 */ 298 298 DEFINE_STATIC_KEY_TRUE(deferred_pages); 299 299 300 - static inline bool deferred_pages_enabled(void) 301 - { 302 - return static_branch_unlikely(&deferred_pages); 303 - } 304 - 305 300 /* 306 301 * deferred_grow_zone() is __init, but it is called from 307 302 * get_page_from_freelist() during early boot until deferred_pages permanently ··· 309 314 return deferred_grow_zone(zone, order); 310 315 } 311 316 #else 312 - static inline bool deferred_pages_enabled(void) 313 - { 314 - return false; 315 - } 316 - 317 317 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) 318 318 { 319 319 return false; ··· 6200 6210 setup_per_zone_lowmem_reserve(); 6201 6211 } 6202 6212 EXPORT_SYMBOL(adjust_managed_page_count); 6203 - 6204 - unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 6205 - { 6206 - void *pos; 6207 - unsigned long pages = 0; 6208 - 6209 - start = (void *)PAGE_ALIGN((unsigned long)start); 6210 - end = (void *)((unsigned long)end & PAGE_MASK); 6211 - for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6212 - struct page *page = virt_to_page(pos); 6213 - void *direct_map_addr; 6214 - 6215 - /* 6216 - * 'direct_map_addr' might be different from 'pos' 6217 - * because some architectures' virt_to_page() 6218 - * work with aliases. Getting the direct map 6219 - * address ensures that we get a _writeable_ 6220 - * alias for the memset(). 6221 - */ 6222 - direct_map_addr = page_address(page); 6223 - /* 6224 - * Perform a kasan-unchecked memset() since this memory 6225 - * has not been initialized. 6226 - */ 6227 - direct_map_addr = kasan_reset_tag(direct_map_addr); 6228 - if ((unsigned int)poison <= 0xFF) 6229 - memset(direct_map_addr, poison, PAGE_SIZE); 6230 - 6231 - free_reserved_page(page); 6232 - } 6233 - 6234 - if (pages && s) 6235 - pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 6236 - 6237 - return pages; 6238 - } 6239 6213 6240 6214 void free_reserved_page(struct page *page) 6241 6215 {
+1 -2
tools/include/linux/mm.h
··· 17 17 18 18 #define __va(x) ((void *)((unsigned long)(x))) 19 19 #define __pa(x) ((unsigned long)(x)) 20 + #define __pa_symbol(x) ((unsigned long)(x)) 20 21 21 22 #define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE)) 22 23 ··· 32 31 { 33 32 return (phys_addr_t)address; 34 33 } 35 - 36 - void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid); 37 34 38 35 static inline void totalram_pages_inc(void) 39 36 {
+40 -3
tools/testing/memblock/internal.h
··· 11 11 12 12 #define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__) 13 13 14 + #define K(x) ((x) << (PAGE_SHIFT-10)) 15 + 14 16 bool mirrored_kernelcore = false; 15 17 16 18 struct page {}; 19 + static inline void *page_address(struct page *page) 20 + { 21 + BUG(); 22 + return page; 23 + } 24 + 25 + static inline struct page *virt_to_page(void *virt) 26 + { 27 + BUG(); 28 + return virt; 29 + } 17 30 18 31 void memblock_free_pages(unsigned long pfn, unsigned int order) 19 32 { ··· 36 23 { 37 24 } 38 25 39 - static inline unsigned long free_reserved_area(void *start, void *end, 40 - int poison, const char *s) 26 + unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); 27 + void free_reserved_page(struct page *page); 28 + 29 + static inline bool deferred_pages_enabled(void) 41 30 { 42 - return 0; 31 + return false; 43 32 } 33 + 34 + #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ 35 + for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++) 36 + 37 + static inline void *kasan_reset_tag(const void *addr) 38 + { 39 + return (void *)addr; 40 + } 41 + 42 + static inline bool __is_kernel(unsigned long addr) 43 + { 44 + return false; 45 + } 46 + 47 + #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ 48 + for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++) 49 + 50 + static inline void init_deferred_page(unsigned long pfn, int nid) 51 + { 52 + } 53 + 54 + #define __SetPageReserved(p) ((void)(p)) 44 55 45 56 #endif
+10
tools/testing/memblock/linux/string_helpers.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_STRING_HELPERS_H_ 3 + #define _LINUX_STRING_HELPERS_H_ 4 + 5 + /* 6 + * Header stub to avoid test build breakage; we don't need to 7 + * actually implement string_get_size() as it's not used in the tests. 8 + */ 9 + 10 + #endif
-4
tools/testing/memblock/mmzone.c
··· 11 11 return NULL; 12 12 } 13 13 14 - void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid) 15 - { 16 - } 17 - 18 14 void atomic_long_set(atomic_long_t *v, long i) 19 15 { 20 16 }