Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'memblock-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:

- update tools/include/linux/mm.h to fix memblock tests compilation

- drop redundant struct page* parameter from memblock_free_pages() and
get struct page from the pfn

- add underflow detection for size calculation in memtest and warn
about underflow when VM_DEBUG is enabled

* tag 'memblock-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
mm/memtest: add underflow detection for size calculation
memblock: drop redundant 'struct page *' argument from memblock_free_pages()
memblock test: include <linux/sizes.h> from tools mm.h stub

+10 -8
+1 -2
mm/internal.h
··· 809 809 extern int __isolate_free_page(struct page *page, unsigned int order); 810 810 extern void __putback_isolated_page(struct page *page, unsigned int order, 811 811 int mt); 812 - extern void memblock_free_pages(struct page *page, unsigned long pfn, 813 - unsigned int order); 812 + extern void memblock_free_pages(unsigned long pfn, unsigned int order); 814 813 extern void __free_pages_core(struct page *page, unsigned int order, 815 814 enum meminit_context context); 816 815
+2 -2
mm/memblock.c
··· 1772 1772 end = PFN_DOWN(base + size); 1773 1773 1774 1774 for (; cursor < end; cursor++) { 1775 - memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1775 + memblock_free_pages(cursor, 0); 1776 1776 totalram_pages_inc(); 1777 1777 } 1778 1778 } ··· 2217 2217 while (start + (1UL << order) > end) 2218 2218 order--; 2219 2219 2220 - memblock_free_pages(pfn_to_page(start), start, order); 2220 + memblock_free_pages(start, order); 2221 2221 2222 2222 start += (1UL << order); 2223 2223 }
+2
mm/memtest.c
··· 50 50 start_bad = 0; 51 51 last_bad = 0; 52 52 53 + VM_WARN_ON_ONCE(size < start_phys_aligned - start_phys); 54 + 53 55 for (p = start; p < end; p++) 54 56 WRITE_ONCE(*p, pattern); 55 57
+3 -2
mm/mm_init.c
··· 2474 2474 return table; 2475 2475 } 2476 2476 2477 - void __init memblock_free_pages(struct page *page, unsigned long pfn, 2478 - unsigned int order) 2477 + void __init memblock_free_pages(unsigned long pfn, unsigned int order) 2479 2478 { 2479 + struct page *page = pfn_to_page(pfn); 2480 + 2480 2481 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { 2481 2482 int nid = early_pfn_to_nid(pfn); 2482 2483
+1
tools/include/linux/mm.h
··· 4 4 5 5 #include <linux/align.h> 6 6 #include <linux/mmzone.h> 7 + #include <linux/sizes.h> 7 8 8 9 #define PAGE_SHIFT 12 9 10 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+1 -2
tools/testing/memblock/internal.h
··· 15 15 16 16 struct page {}; 17 17 18 - void memblock_free_pages(struct page *page, unsigned long pfn, 19 - unsigned int order) 18 + void memblock_free_pages(unsigned long pfn, unsigned int order) 20 19 { 21 20 } 22 21