Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"18 patches.

Subsystems affected by this patch series: mm (pagealloc, memcg, kasan,
memory-failure, and highmem), ubsan, proc, and MAINTAINERS"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
MAINTAINERS: add a couple more files to the Clang/LLVM section
proc_sysctl: fix oops caused by incorrect command parameters
powerpc/mm/highmem: use __set_pte_at() for kmap_local()
mips/mm/highmem: use set_pte() for kmap_local()
mm/highmem: prepare for overriding set_pte_at()
sparc/mm/highmem: flush cache and TLB
mm: fix page reference leak in soft_offline_page()
ubsan: disable unsigned-overflow check for i386
kasan, mm: fix resetting page_alloc tags for HW_TAGS
kasan, mm: fix conflicts with init_on_alloc/free
kasan: fix HW_TAGS boot parameters
kasan: fix incorrect arguments passing in kasan_add_zero_shadow
kasan: fix unaligned address is unhandled in kasan_remove_zero_shadow
mm: fix numa stats for thp migration
mm: memcg: fix memcg file_dirty numa stat
mm: memcg/slab: optimize objcg stock draining
mm: fix initialization of struct page for holes in memory layout
x86/setup: don't remove E820_TYPE_RAM for pfn 0

+168 -148
+6 -21
Documentation/dev-tools/kasan.rst
··· 160 160 boot parameters that allow to disable KASAN competely or otherwise control 161 161 particular KASAN features. 162 162 163 - The things that can be controlled are: 163 + - ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``). 164 164 165 - 1. Whether KASAN is enabled at all. 166 - 2. Whether KASAN collects and saves alloc/free stacks. 167 - 3. Whether KASAN panics on a detected bug or not. 165 + - ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack 166 + traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise 167 + ``off``). 168 168 169 - The ``kasan.mode`` boot parameter allows to choose one of three main modes: 170 - 171 - - ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed 172 - - ``kasan.mode=prod`` - only essential production features are enabled 173 - - ``kasan.mode=full`` - all KASAN features are enabled 174 - 175 - The chosen mode provides default control values for the features mentioned 176 - above. However it's also possible to override the default values by providing: 177 - 178 - - ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection 179 - (default: ``on`` for ``mode=full``, 180 - otherwise ``off``) 181 - - ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic 182 - (default: ``report``) 183 - 184 - If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when 185 - ``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise. 169 + - ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN 170 + report or also panic the kernel (default: ``report``). 186 171 187 172 For developers 188 173 ~~~~~~~~~~~~~~
+2
MAINTAINERS
··· 4311 4311 B: https://github.com/ClangBuiltLinux/linux/issues 4312 4312 C: irc://chat.freenode.net/clangbuiltlinux 4313 4313 F: Documentation/kbuild/llvm.rst 4314 + F: include/linux/compiler-clang.h 4314 4315 F: scripts/clang-tools/ 4316 + F: scripts/clang-version.sh 4315 4317 F: scripts/lld-version.sh 4316 4318 K: \b(?i:clang|llvm)\b 4317 4319
+1
arch/mips/include/asm/highmem.h
··· 51 51 52 52 #define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases) 53 53 54 + #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev) 54 55 #define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr) 55 56 #define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr) 56 57
+2
arch/powerpc/include/asm/highmem.h
··· 58 58 59 59 #define flush_cache_kmaps() flush_cache_all() 60 60 61 + #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ 62 + __set_pte_at(mm, vaddr, ptep, ptev, 1) 61 63 #define arch_kmap_local_post_map(vaddr, pteval) \ 62 64 local_flush_tlb_page(NULL, vaddr) 63 65 #define arch_kmap_local_post_unmap(vaddr) \
+5 -4
arch/sparc/include/asm/highmem.h
··· 50 50 51 51 #define flush_cache_kmaps() flush_cache_all() 52 52 53 - /* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */ 54 - #define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all() 55 - #define arch_kmap_local_post_unmap(vaddr) flush_cache_all() 56 - 53 + /* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */ 54 + #define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all() 55 + #define arch_kmap_local_pre_unmap(vaddr) flush_cache_all() 56 + #define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all() 57 + #define arch_kmap_local_post_unmap(vaddr) flush_tlb_all() 57 58 58 59 #endif /* __KERNEL__ */ 59 60
+9 -11
arch/x86/kernel/setup.c
··· 661 661 static void __init trim_bios_range(void) 662 662 { 663 663 /* 664 - * A special case is the first 4Kb of memory; 665 - * This is a BIOS owned area, not kernel ram, but generally 666 - * not listed as such in the E820 table. 667 - * 668 - * This typically reserves additional memory (64KiB by default) 669 - * since some BIOSes are known to corrupt low memory. See the 670 - * Kconfig help text for X86_RESERVE_LOW. 671 - */ 672 - e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); 673 - 674 - /* 675 664 * special case: Some BIOSes report the PC BIOS 676 665 * area (640Kb -> 1Mb) as RAM even though it is not. 677 666 * take them out. ··· 717 728 718 729 static void __init trim_low_memory_range(void) 719 730 { 731 + /* 732 + * A special case is the first 4Kb of memory; 733 + * This is a BIOS owned area, not kernel ram, but generally 734 + * not listed as such in the E820 table. 735 + * 736 + * This typically reserves additional memory (64KiB by default) 737 + * since some BIOSes are known to corrupt low memory. See the 738 + * Kconfig help text for X86_RESERVE_LOW. 739 + */ 720 740 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); 721 741 } 722 742
+6 -1
fs/proc/proc_sysctl.c
··· 1770 1770 return 0; 1771 1771 } 1772 1772 1773 + if (!val) 1774 + return -EINVAL; 1775 + len = strlen(val); 1776 + if (len == 0) 1777 + return -EINVAL; 1778 + 1773 1779 /* 1774 1780 * To set sysctl options, we use a temporary mount of proc, look up the 1775 1781 * respective sys/ file and write to it. To avoid mounting it when no ··· 1817 1811 file, param, val); 1818 1812 goto out; 1819 1813 } 1820 - len = strlen(val); 1821 1814 wret = kernel_write(file, val, len, &pos); 1822 1815 if (wret < 0) { 1823 1816 err = wret;
+1
lib/Kconfig.ubsan
··· 123 123 config UBSAN_UNSIGNED_OVERFLOW 124 124 bool "Perform checking for unsigned arithmetic overflow" 125 125 depends on $(cc-option,-fsanitize=unsigned-integer-overflow) 126 + depends on !X86_32 # avoid excessive stack usage on x86-32/clang 126 127 help 127 128 This option enables -fsanitize=unsigned-integer-overflow which checks 128 129 for overflow of any arithmetic operations with unsigned integers. This
+6 -1
mm/highmem.c
··· 473 473 } 474 474 #endif 475 475 476 + #ifndef arch_kmap_local_set_pte 477 + #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ 478 + set_pte_at(mm, vaddr, ptep, ptev) 479 + #endif 480 + 476 481 /* Unmap a local mapping which was obtained by kmap_high_get() */ 477 482 static inline bool kmap_high_unmap_local(unsigned long vaddr) 478 483 { ··· 520 515 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 521 516 BUG_ON(!pte_none(*(kmap_pte - idx))); 522 517 pteval = pfn_pte(pfn, prot); 523 - set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval); 518 + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); 524 519 arch_kmap_local_post_map(vaddr, pteval); 525 520 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; 526 521 preempt_enable();
+32 -45
mm/kasan/hw_tags.c
··· 19 19 20 20 #include "kasan.h" 21 21 22 - enum kasan_arg_mode { 23 - KASAN_ARG_MODE_DEFAULT, 24 - KASAN_ARG_MODE_OFF, 25 - KASAN_ARG_MODE_PROD, 26 - KASAN_ARG_MODE_FULL, 22 + enum kasan_arg { 23 + KASAN_ARG_DEFAULT, 24 + KASAN_ARG_OFF, 25 + KASAN_ARG_ON, 27 26 }; 28 27 29 28 enum kasan_arg_stacktrace { ··· 37 38 KASAN_ARG_FAULT_PANIC, 38 39 }; 39 40 40 - static enum kasan_arg_mode kasan_arg_mode __ro_after_init; 41 + static enum kasan_arg kasan_arg __ro_after_init; 41 42 static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init; 42 43 static enum kasan_arg_fault kasan_arg_fault __ro_after_init; 43 44 ··· 51 52 /* Whether panic or disable tag checking on fault. */ 52 53 bool kasan_flag_panic __ro_after_init; 53 54 54 - /* kasan.mode=off/prod/full */ 55 - static int __init early_kasan_mode(char *arg) 55 + /* kasan=off/on */ 56 + static int __init early_kasan_flag(char *arg) 56 57 { 57 58 if (!arg) 58 59 return -EINVAL; 59 60 60 61 if (!strcmp(arg, "off")) 61 - kasan_arg_mode = KASAN_ARG_MODE_OFF; 62 - else if (!strcmp(arg, "prod")) 63 - kasan_arg_mode = KASAN_ARG_MODE_PROD; 64 - else if (!strcmp(arg, "full")) 65 - kasan_arg_mode = KASAN_ARG_MODE_FULL; 62 + kasan_arg = KASAN_ARG_OFF; 63 + else if (!strcmp(arg, "on")) 64 + kasan_arg = KASAN_ARG_ON; 66 65 else 67 66 return -EINVAL; 68 67 69 68 return 0; 70 69 } 71 - early_param("kasan.mode", early_kasan_mode); 70 + early_param("kasan", early_kasan_flag); 72 71 73 - /* kasan.stack=off/on */ 72 + /* kasan.stacktrace=off/on */ 74 73 static int __init early_kasan_flag_stacktrace(char *arg) 75 74 { 76 75 if (!arg) ··· 110 113 * as this function is only called for MTE-capable hardware. 111 114 */ 112 115 113 - /* If KASAN is disabled, do nothing. */ 114 - if (kasan_arg_mode == KASAN_ARG_MODE_OFF) 116 + /* If KASAN is disabled via command line, don't initialize it. */ 117 + if (kasan_arg == KASAN_ARG_OFF) 115 118 return; 116 119 117 120 hw_init_tags(KASAN_TAG_MAX); ··· 121 124 /* kasan_init_hw_tags() is called once on boot CPU. */ 122 125 void __init kasan_init_hw_tags(void) 123 126 { 124 - /* If hardware doesn't support MTE, do nothing. */ 127 + /* If hardware doesn't support MTE, don't initialize KASAN. */ 125 128 if (!system_supports_mte()) 126 129 return; 127 130 128 - /* Choose KASAN mode if kasan boot parameter is not provided. */ 129 - if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) { 130 - if (IS_ENABLED(CONFIG_DEBUG_KERNEL)) 131 - kasan_arg_mode = KASAN_ARG_MODE_FULL; 132 - else 133 - kasan_arg_mode = KASAN_ARG_MODE_PROD; 134 - } 135 - 136 - /* Preset parameter values based on the mode. */ 137 - switch (kasan_arg_mode) { 138 - case KASAN_ARG_MODE_DEFAULT: 139 - /* Shouldn't happen as per the check above. */ 140 - WARN_ON(1); 131 + /* If KASAN is disabled via command line, don't initialize it. */ 132 + if (kasan_arg == KASAN_ARG_OFF) 141 133 return; 142 - case KASAN_ARG_MODE_OFF: 143 - /* If KASAN is disabled, do nothing. */ 144 - return; 145 - case KASAN_ARG_MODE_PROD: 146 - static_branch_enable(&kasan_flag_enabled); 147 - break; 148 - case KASAN_ARG_MODE_FULL: 149 - static_branch_enable(&kasan_flag_enabled); 150 - static_branch_enable(&kasan_flag_stacktrace); 151 - break; 152 - } 153 134 154 - /* Now, optionally override the presets. */ 135 + /* Enable KASAN. */ 136 + static_branch_enable(&kasan_flag_enabled); 155 137 156 138 switch (kasan_arg_stacktrace) { 157 139 case KASAN_ARG_STACKTRACE_DEFAULT: 140 + /* 141 + * Default to enabling stack trace collection for 142 + * debug kernels. 143 + */ 144 + if (IS_ENABLED(CONFIG_DEBUG_KERNEL)) 145 + static_branch_enable(&kasan_flag_stacktrace); 158 146 break; 159 147 case KASAN_ARG_STACKTRACE_OFF: 160 - static_branch_disable(&kasan_flag_stacktrace); 148 + /* Do nothing, kasan_flag_stacktrace keeps its default value. */ 161 149 break; 162 150 case KASAN_ARG_STACKTRACE_ON: 163 151 static_branch_enable(&kasan_flag_stacktrace); ··· 151 169 152 170 switch (kasan_arg_fault) { 153 171 case KASAN_ARG_FAULT_DEFAULT: 172 + /* 173 + * Default to no panic on report. 174 + * Do nothing, kasan_flag_panic keeps its default value. 175 + */ 154 176 break; 155 177 case KASAN_ARG_FAULT_REPORT: 156 - kasan_flag_panic = false; 178 + /* Do nothing, kasan_flag_panic keeps its default value. */ 157 179 break; 158 180 case KASAN_ARG_FAULT_PANIC: 181 + /* Enable panic on report. */ 159 182 kasan_flag_panic = true; 160 183 break; 161 184 }
+13 -10
mm/kasan/init.c
··· 373 373 374 374 if (kasan_pte_table(*pmd)) { 375 375 if (IS_ALIGNED(addr, PMD_SIZE) && 376 - IS_ALIGNED(next, PMD_SIZE)) 376 + IS_ALIGNED(next, PMD_SIZE)) { 377 377 pmd_clear(pmd); 378 - continue; 378 + continue; 379 + } 379 380 } 380 381 pte = pte_offset_kernel(pmd, addr); 381 382 kasan_remove_pte_table(pte, addr, next); ··· 399 398 400 399 if (kasan_pmd_table(*pud)) { 401 400 if (IS_ALIGNED(addr, PUD_SIZE) && 402 - IS_ALIGNED(next, PUD_SIZE)) 401 + IS_ALIGNED(next, PUD_SIZE)) { 403 402 pud_clear(pud); 404 - continue; 403 + continue; 404 + } 405 405 } 406 406 pmd = pmd_offset(pud, addr); 407 407 pmd_base = pmd_offset(pud, 0); ··· 426 424 427 425 if (kasan_pud_table(*p4d)) { 428 426 if (IS_ALIGNED(addr, P4D_SIZE) && 429 - IS_ALIGNED(next, P4D_SIZE)) 427 + IS_ALIGNED(next, P4D_SIZE)) { 430 428 p4d_clear(p4d); 431 - continue; 429 + continue; 430 + } 432 431 } 433 432 pud = pud_offset(p4d, addr); 434 433 kasan_remove_pud_table(pud, addr, next); ··· 460 457 461 458 if (kasan_p4d_table(*pgd)) { 462 459 if (IS_ALIGNED(addr, PGDIR_SIZE) && 463 - IS_ALIGNED(next, PGDIR_SIZE)) 460 + IS_ALIGNED(next, PGDIR_SIZE)) { 464 461 pgd_clear(pgd); 465 - continue; 462 + continue; 463 + } 466 464 } 467 465 468 466 p4d = p4d_offset(pgd, addr); ··· 486 482 487 483 ret = kasan_populate_early_shadow(shadow_start, shadow_end); 488 484 if (ret) 489 - kasan_remove_zero_shadow(shadow_start, 490 - size >> KASAN_SHADOW_SCALE_SHIFT); 485 + kasan_remove_zero_shadow(start, size); 491 486 return ret; 492 487 }
+1 -3
mm/memcontrol.c
··· 3115 3115 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3116 3116 page_counter_uncharge(&memcg->kmem, nr_pages); 3117 3117 3118 - page_counter_uncharge(&memcg->memory, nr_pages); 3119 - if (do_memsw_account()) 3120 - page_counter_uncharge(&memcg->memsw, nr_pages); 3118 + refill_stock(memcg, nr_pages); 3121 3119 } 3122 3120 3123 3121 /**
+16 -4
mm/memory-failure.c
··· 1885 1885 return rc; 1886 1886 } 1887 1887 1888 + static void put_ref_page(struct page *page) 1889 + { 1890 + if (page) 1891 + put_page(page); 1892 + } 1893 + 1888 1894 /** 1889 1895 * soft_offline_page - Soft offline a page. 1890 1896 * @pfn: pfn to soft-offline ··· 1916 1910 int soft_offline_page(unsigned long pfn, int flags) 1917 1911 { 1918 1912 int ret; 1919 - struct page *page; 1920 1913 bool try_again = true; 1914 + struct page *page, *ref_page = NULL; 1915 + 1916 + WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED)); 1921 1917 1922 1918 if (!pfn_valid(pfn)) 1923 1919 return -ENXIO; 1920 + if (flags & MF_COUNT_INCREASED) 1921 + ref_page = pfn_to_page(pfn); 1922 + 1924 1923 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ 1925 1924 page = pfn_to_online_page(pfn); 1926 - if (!page) 1925 + if (!page) { 1926 + put_ref_page(ref_page); 1927 1927 return -EIO; 1928 + } 1928 1929 1929 1930 if (PageHWPoison(page)) { 1930 1931 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); 1931 - if (flags & MF_COUNT_INCREASED) 1932 - put_page(page); 1932 + put_ref_page(ref_page); 1933 1933 return 0; 1934 1934 } 1935 1935
+12 -11
mm/migrate.c
··· 402 402 struct zone *oldzone, *newzone; 403 403 int dirty; 404 404 int expected_count = expected_page_refs(mapping, page) + extra_count; 405 + int nr = thp_nr_pages(page); 405 406 406 407 if (!mapping) { 407 408 /* Anonymous page without mapping */ ··· 438 437 */ 439 438 newpage->index = page->index; 440 439 newpage->mapping = page->mapping; 441 - page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */ 440 + page_ref_add(newpage, nr); /* add cache reference */ 442 441 if (PageSwapBacked(page)) { 443 442 __SetPageSwapBacked(newpage); 444 443 if (PageSwapCache(page)) { ··· 460 459 if (PageTransHuge(page)) { 461 460 int i; 462 461 463 - for (i = 1; i < HPAGE_PMD_NR; i++) { 462 + for (i = 1; i < nr; i++) { 464 463 xas_next(&xas); 465 464 xas_store(&xas, newpage); 466 465 } ··· 471 470 * to one less reference. 472 471 * We know this isn't the last reference. 473 472 */ 474 - page_ref_unfreeze(page, expected_count - thp_nr_pages(page)); 473 + page_ref_unfreeze(page, expected_count - nr); 475 474 476 475 xas_unlock(&xas); 477 476 /* Leave irq disabled to prevent preemption while updating stats */ ··· 494 493 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 495 494 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 496 495 497 - __dec_lruvec_state(old_lruvec, NR_FILE_PAGES); 498 - __inc_lruvec_state(new_lruvec, NR_FILE_PAGES); 496 + __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 497 + __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 499 498 if (PageSwapBacked(page) && !PageSwapCache(page)) { 500 - __dec_lruvec_state(old_lruvec, NR_SHMEM); 501 - __inc_lruvec_state(new_lruvec, NR_SHMEM); 499 + __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 500 + __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 502 501 } 503 502 if (dirty && mapping_can_writeback(mapping)) { 504 - __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); 505 - __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); 506 - __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); 507 - __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); 503 + __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 504 + __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 505 + __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 506 + __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 508 507 } 509 508 } 510 509 local_irq_enable();
+52 -34
mm/page_alloc.c
··· 1207 1207 /* s390's use of memset() could override KASAN redzones. */ 1208 1208 kasan_disable_current(); 1209 1209 for (i = 0; i < numpages; i++) { 1210 + u8 tag = page_kasan_tag(page + i); 1210 1211 page_kasan_tag_reset(page + i); 1211 1212 clear_highpage(page + i); 1213 + page_kasan_tag_set(page + i, tag); 1212 1214 } 1213 1215 kasan_enable_current(); 1214 1216 } ··· 7080 7078 * Initialize all valid struct pages in the range [spfn, epfn) and mark them 7081 7079 * PageReserved(). Return the number of struct pages that were initialized. 7082 7080 */ 7083 - static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn) 7081 + static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn, 7082 + int zone, int nid) 7084 7083 { 7085 - unsigned long pfn; 7084 + unsigned long pfn, zone_spfn, zone_epfn; 7086 7085 u64 pgcnt = 0; 7086 + 7087 + zone_spfn = arch_zone_lowest_possible_pfn[zone]; 7088 + zone_epfn = arch_zone_highest_possible_pfn[zone]; 7089 + 7090 + spfn = clamp(spfn, zone_spfn, zone_epfn); 7091 + epfn = clamp(epfn, zone_spfn, zone_epfn); 7087 7092 7088 7093 for (pfn = spfn; pfn < epfn; pfn++) { 7089 7094 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { ··· 7098 7089 + pageblock_nr_pages - 1; 7099 7090 continue; 7100 7091 } 7101 - /* 7102 - * Use a fake node/zone (0) for now. Some of these pages 7103 - * (in memblock.reserved but not in memblock.memory) will 7104 - * get re-initialized via reserve_bootmem_region() later. 7105 - */ 7106 - __init_single_page(pfn_to_page(pfn), pfn, 0, 0); 7092 + 7093 + __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 7107 7094 __SetPageReserved(pfn_to_page(pfn)); 7108 7095 pgcnt++; 7109 7096 } ··· 7108 7103 } 7109 7104 7110 7105 /* 7111 - * Only struct pages that are backed by physical memory are zeroed and 7112 - * initialized by going through __init_single_page(). But, there are some 7113 - * struct pages which are reserved in memblock allocator and their fields 7114 - * may be accessed (for example page_to_pfn() on some configuration accesses 7115 - * flags). We must explicitly initialize those struct pages. 7106 + * Only struct pages that correspond to ranges defined by memblock.memory 7107 + * are zeroed and initialized by going through __init_single_page() during 7108 + * memmap_init(). 7116 7109 * 7117 - * This function also addresses a similar issue where struct pages are left 7118 - * uninitialized because the physical address range is not covered by 7119 - * memblock.memory or memblock.reserved. That could happen when memblock 7120 - * layout is manually configured via memmap=, or when the highest physical 7121 - * address (max_pfn) does not end on a section boundary. 7110 + * But, there could be struct pages that correspond to holes in 7111 + * memblock.memory. This can happen because of the following reasons: 7112 + * - phyiscal memory bank size is not necessarily the exact multiple of the 7113 + * arbitrary section size 7114 + * - early reserved memory may not be listed in memblock.memory 7115 + * - memory layouts defined with memmap= kernel parameter may not align 7116 + * nicely with memmap sections 7117 + * 7118 + * Explicitly initialize those struct pages so that: 7119 + * - PG_Reserved is set 7120 + * - zone link is set accorging to the architecture constrains 7121 + * - node is set to node id of the next populated region except for the 7122 + * trailing hole where last node id is used 7122 7123 */ 7123 - static void __init init_unavailable_mem(void) 7124 + static void __init init_zone_unavailable_mem(int zone) 7124 7125 { 7125 - phys_addr_t start, end; 7126 - u64 i, pgcnt; 7127 - phys_addr_t next = 0; 7126 + unsigned long start, end; 7127 + int i, nid; 7128 + u64 pgcnt; 7129 + unsigned long next = 0; 7128 7130 7129 7131 /* 7130 - * Loop through unavailable ranges not covered by memblock.memory. 7132 + * Loop through holes in memblock.memory and initialize struct 7133 + * pages corresponding to these holes 7131 7134 */ 7132 7135 pgcnt = 0; 7133 - for_each_mem_range(i, &start, &end) { 7136 + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 7134 7137 if (next < start) 7135 - pgcnt += init_unavailable_range(PFN_DOWN(next), 7136 - PFN_UP(start)); 7138 + pgcnt += init_unavailable_range(next, start, zone, nid); 7137 7139 next = end; 7138 7140 } 7139 7141 7140 7142 /* 7141 - * Early sections always have a fully populated memmap for the whole 7142 - * section - see pfn_valid(). If the last section has holes at the 7143 - * end and that section is marked "online", the memmap will be 7144 - * considered initialized. Make sure that memmap has a well defined 7145 - * state. 7143 + * Last section may surpass the actual end of memory (e.g. we can 7144 + * have 1Gb section and 512Mb of RAM pouplated). 7145 + * Make sure that memmap has a well defined state in this case. 7146 7146 */ 7147 - pgcnt += init_unavailable_range(PFN_DOWN(next), 7148 - round_up(max_pfn, PAGES_PER_SECTION)); 7147 + end = round_up(max_pfn, PAGES_PER_SECTION); 7148 + pgcnt += init_unavailable_range(next, end, zone, nid); 7149 7149 7150 7150 /* 7151 7151 * Struct pages that do not have backing memory. This could be because 7152 7152 * firmware is using some of this memory, or for some other reasons. 7153 7153 */ 7154 7154 if (pgcnt) 7155 - pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); 7155 + pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt); 7156 + } 7157 + 7158 + static void __init init_unavailable_mem(void) 7159 + { 7160 + int zone; 7161 + 7162 + for (zone = 0; zone < ZONE_MOVABLE; zone++) 7163 + init_zone_unavailable_mem(zone); 7156 7164 } 7157 7165 #else 7158 7166 static inline void __init init_unavailable_mem(void)
+4 -3
mm/slub.c
··· 2791 2791 void *obj) 2792 2792 { 2793 2793 if (unlikely(slab_want_init_on_free(s)) && obj) 2794 - memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); 2794 + memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 2795 + 0, sizeof(void *)); 2795 2796 } 2796 2797 2797 2798 /* ··· 2884 2883 stat(s, ALLOC_FASTPATH); 2885 2884 } 2886 2885 2887 - maybe_wipe_obj_freeptr(s, kasan_reset_tag(object)); 2886 + maybe_wipe_obj_freeptr(s, object); 2888 2887 2889 2888 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) 2890 2889 memset(kasan_reset_tag(object), 0, s->object_size); ··· 3330 3329 int j; 3331 3330 3332 3331 for (j = 0; j < i; j++) 3333 - memset(p[j], 0, s->object_size); 3332 + memset(kasan_reset_tag(p[j]), 0, s->object_size); 3334 3333 } 3335 3334 3336 3335 /* memcg and kmem_cache debug support */