Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

memblock, treewide: make memblock_free() handle late freeing

It shouldn't be responsibility of memblock users to detect if they free
memory allocated from memblock late and should use memblock_free_late().

Make memblock_free() and memblock_phys_free() take care of late memory
freeing and drop memblock_free_late().

Link: https://patch.msgid.link/20260323074836.3653702-9-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

+31 -49
+1 -3
arch/sparc/kernel/mdesc.c
··· 183 183 static void __init mdesc_memblock_free(struct mdesc_handle *hp) 184 184 { 185 185 unsigned int alloc_size; 186 - unsigned long start; 187 186 188 187 BUG_ON(refcount_read(&hp->refcnt) != 0); 189 188 BUG_ON(!list_empty(&hp->list)); 190 189 191 190 alloc_size = PAGE_ALIGN(hp->handle_size); 192 - start = __pa(hp); 193 - memblock_free_late(start, alloc_size); 191 + memblock_free(hp, alloc_size); 194 192 } 195 193 196 194 static struct mdesc_mem_ops memblock_mdesc_ops = {
+1 -1
arch/x86/kernel/setup.c
··· 426 426 if (!ima_kexec_buffer_size) 427 427 return -ENOENT; 428 428 429 - memblock_free_late(ima_kexec_buffer_phys, 429 + memblock_phys_free(ima_kexec_buffer_phys, 430 430 ima_kexec_buffer_size); 431 431 432 432 ima_kexec_buffer_phys = 0;
+1 -4
arch/x86/platform/efi/memmap.c
··· 34 34 void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) 35 35 { 36 36 if (flags & EFI_MEMMAP_MEMBLOCK) { 37 - if (slab_is_available()) 38 - memblock_free_late(phys, size); 39 - else 40 - memblock_phys_free(phys, size); 37 + memblock_phys_free(phys, size); 41 38 } else if (flags & EFI_MEMMAP_SLAB) { 42 39 struct page *p = pfn_to_page(PHYS_PFN(phys)); 43 40 unsigned int order = get_order(size);
+1 -1
arch/x86/platform/efi/quirks.c
··· 372 372 * doesn't make sense as far as the firmware is 373 373 * concerned, but it does provide us with a way to tag 374 374 * those regions that must not be paired with 375 - * memblock_free_late(). 375 + * memblock_phys_free(). 376 376 */ 377 377 md->attribute |= EFI_MEMORY_RUNTIME; 378 378 }
+1 -1
drivers/firmware/efi/apple-properties.c
··· 226 226 */ 227 227 data->len = 0; 228 228 memunmap(data); 229 - memblock_free_late(pa_data + sizeof(*data), data_len); 229 + memblock_phys_free(pa_data + sizeof(*data), data_len); 230 230 231 231 return ret; 232 232 }
+1 -1
drivers/of/kexec.c
··· 175 175 if (ret) 176 176 return ret; 177 177 178 - memblock_free_late(addr, size); 178 + memblock_phys_free(addr, size); 179 179 return 0; 180 180 } 181 181 #endif
-2
include/linux/memblock.h
··· 172 172 struct memblock_type *type_b, phys_addr_t *out_start, 173 173 phys_addr_t *out_end, int *out_nid); 174 174 175 - void memblock_free_late(phys_addr_t base, phys_addr_t size); 176 - 177 175 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 178 176 static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, 179 177 phys_addr_t *out_start,
+3 -3
kernel/dma/swiotlb.c
··· 546 546 free_pages(tbl_vaddr, get_order(tbl_size)); 547 547 free_pages((unsigned long)mem->slots, get_order(slots_size)); 548 548 } else { 549 - memblock_free_late(__pa(mem->areas), 549 + memblock_free(mem->areas, 550 550 array_size(sizeof(*mem->areas), mem->nareas)); 551 - memblock_free_late(mem->start, tbl_size); 552 - memblock_free_late(__pa(mem->slots), slots_size); 551 + memblock_phys_free(mem->start, tbl_size); 552 + memblock_free(mem->slots, slots_size); 553 553 } 554 554 555 555 memset(mem, 0, sizeof(*mem));
+1 -1
lib/bootconfig.c
··· 64 64 if (early) 65 65 memblock_free(addr, size); 66 66 else if (addr) 67 - memblock_free_late(__pa(addr), size); 67 + memblock_free(addr, size); 68 68 } 69 69 70 70 #else /* !__KERNEL__ */
+2 -2
mm/kfence/core.c
··· 731 731 * fails for the first page, and therefore expect addr==__kfence_pool in 732 732 * most failure cases. 733 733 */ 734 - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); 734 + memblock_free((void *)addr, KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); 735 735 __kfence_pool = NULL; 736 736 737 - memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); 737 + memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE); 738 738 kfence_metadata_init = NULL; 739 739 740 740 return false;
+19 -30
mm/memblock.c
··· 385 385 */ 386 386 void __init memblock_discard(void) 387 387 { 388 - phys_addr_t addr, size; 388 + phys_addr_t size; 389 + void *addr; 389 390 390 391 if (memblock.reserved.regions != memblock_reserved_init_regions) { 391 - addr = __pa(memblock.reserved.regions); 392 + addr = memblock.reserved.regions; 392 393 size = PAGE_ALIGN(sizeof(struct memblock_region) * 393 394 memblock.reserved.max); 394 395 if (memblock_reserved_in_slab) 395 - kfree(memblock.reserved.regions); 396 + kfree(addr); 396 397 else 397 - memblock_free_late(addr, size); 398 + memblock_free(addr, size); 398 399 } 399 400 400 401 if (memblock.memory.regions != memblock_memory_init_regions) { 401 - addr = __pa(memblock.memory.regions); 402 + addr = memblock.memory.regions; 402 403 size = PAGE_ALIGN(sizeof(struct memblock_region) * 403 404 memblock.memory.max); 404 405 if (memblock_memory_in_slab) 405 - kfree(memblock.memory.regions); 406 + kfree(addr); 406 407 else 407 - memblock_free_late(addr, size); 408 + memblock_free(addr, size); 408 409 } 409 410 410 411 memblock_memory = NULL; ··· 963 962 * @size: size of the boot memory block in bytes 964 963 * 965 964 * Free boot memory block previously allocated by memblock_alloc_xx() API. 966 - * The freeing memory will not be released to the buddy allocator. 965 + * If called after the buddy allocator is available, the memory is released to 966 + * the buddy allocator. 967 967 */ 968 968 void __init_memblock memblock_free(void *ptr, size_t size) 969 969 { ··· 978 976 * @size: size of the boot memory block in bytes 979 977 * 980 978 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. 981 - * The freeing memory will not be released to the buddy allocator. 979 + * If called after the buddy allocator is available, the memory is released to 980 + * the buddy allocator. 982 981 */ 983 982 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) 984 983 { 985 984 phys_addr_t end = base + size - 1; 985 + int ret; 986 986 987 987 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 988 988 &base, &end, (void *)_RET_IP_); 989 989 990 990 kmemleak_free_part_phys(base, size); 991 - return memblock_remove_range(&memblock.reserved, base, size); 991 + ret = memblock_remove_range(&memblock.reserved, base, size); 992 + 993 + if (slab_is_available()) 994 + __free_reserved_area(base, base + size, -1); 995 + 996 + return ret; 992 997 } 993 998 994 999 int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size, ··· 1821 1812 if (unlikely(!addr)) 1822 1813 panic("%s: Failed to allocate %pap bytes\n", func, &size); 1823 1814 return addr; 1824 - } 1825 - 1826 - /** 1827 - * memblock_free_late - free pages directly to buddy allocator 1828 - * @base: phys starting address of the boot memory block 1829 - * @size: size of the boot memory block in bytes 1830 - * 1831 - * This is only useful when the memblock allocator has already been torn 1832 - * down, but we are still initializing the system. Pages are released directly 1833 - * to the buddy allocator. 1834 - */ 1835 - void __init memblock_free_late(phys_addr_t base, phys_addr_t size) 1836 - { 1837 - phys_addr_t end = base + size - 1; 1838 - 1839 - memblock_dbg("%s: [%pa-%pa] %pS\n", 1840 - __func__, &base, &end, (void *)_RET_IP_); 1841 - 1842 - kmemleak_free_part_phys(base, size); 1843 - __free_reserved_area(base, base + size, -1); 1844 1815 } 1845 1816 1846 1817 /*