Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"2 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
hugetlbfs: fix races and page leaks during migration
kasan: turn off asan-stack for clang-8 and earlier

+59 -4
+12
fs/hugetlbfs/inode.c
··· 859 859 rc = migrate_huge_page_move_mapping(mapping, newpage, page); 860 860 if (rc != MIGRATEPAGE_SUCCESS) 861 861 return rc; 862 + 863 + /* 864 + * page_private is subpool pointer in hugetlb pages. Transfer to 865 + * new page. PagePrivate is not associated with page_private for 866 + * hugetlb pages and can not be set here as only page_huge_active 867 + * pages can be migrated. 868 + */ 869 + if (page_private(page)) { 870 + set_page_private(newpage, page_private(page)); 871 + set_page_private(page, 0); 872 + } 873 + 862 874 if (mode != MIGRATE_SYNC_NO_COPY) 863 875 migrate_page_copy(newpage, page); 864 876 else
+22
lib/Kconfig.kasan
··· 113 113 114 114 endchoice 115 115 116 + config KASAN_STACK_ENABLE 117 + bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST 118 + default !(CLANG_VERSION < 90000) 119 + depends on KASAN 120 + help 121 + The LLVM stack address sanitizer has a know problem that 122 + causes excessive stack usage in a lot of functions, see 123 + https://bugs.llvm.org/show_bug.cgi?id=38809 124 + Disabling asan-stack makes it safe to run kernels build 125 + with clang-8 with KASAN enabled, though it loses some of 126 + the functionality. 127 + This feature is always disabled when compile-testing with clang-8 128 + or earlier to avoid cluttering the output in stack overflow 129 + warnings, but clang-8 users can still enable it for builds without 130 + CONFIG_COMPILE_TEST. On gcc and later clang versions it is 131 + assumed to always be safe to use and enabled by default. 132 + 133 + config KASAN_STACK 134 + int 135 + default 1 if KASAN_STACK_ENABLE || CC_IS_GCC 136 + default 0 137 + 116 138 config KASAN_S390_4_LEVEL_PAGING 117 139 bool "KASan: use 4-level paging" 118 140 depends on KASAN && S390
+13 -3
mm/hugetlb.c
··· 3624 3624 copy_user_huge_page(new_page, old_page, address, vma, 3625 3625 pages_per_huge_page(h)); 3626 3626 __SetPageUptodate(new_page); 3627 - set_page_huge_active(new_page); 3628 3627 3629 3628 mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h)); 3630 3629 mmu_notifier_invalidate_range_start(&range); ··· 3644 3645 make_huge_pte(vma, new_page, 1)); 3645 3646 page_remove_rmap(old_page, true); 3646 3647 hugepage_add_new_anon_rmap(new_page, vma, haddr); 3648 + set_page_huge_active(new_page); 3647 3649 /* Make the old page be freed below */ 3648 3650 new_page = old_page; 3649 3651 } ··· 3729 3729 pte_t new_pte; 3730 3730 spinlock_t *ptl; 3731 3731 unsigned long haddr = address & huge_page_mask(h); 3732 + bool new_page = false; 3732 3733 3733 3734 /* 3734 3735 * Currently, we are forced to kill the process in the event the ··· 3791 3790 } 3792 3791 clear_huge_page(page, address, pages_per_huge_page(h)); 3793 3792 __SetPageUptodate(page); 3794 - set_page_huge_active(page); 3793 + new_page = true; 3795 3794 3796 3795 if (vma->vm_flags & VM_MAYSHARE) { 3797 3796 int err = huge_add_to_page_cache(page, mapping, idx); ··· 3862 3861 } 3863 3862 3864 3863 spin_unlock(ptl); 3864 + 3865 + /* 3866 + * Only make newly allocated pages active. Existing pages found 3867 + * in the pagecache could be !page_huge_active() if they have been 3868 + * isolated for migration. 3869 + */ 3870 + if (new_page) 3871 + set_page_huge_active(page); 3872 + 3865 3873 unlock_page(page); 3866 3874 out: 3867 3875 return ret; ··· 4105 4095 * the set_pte_at() write. 4106 4096 */ 4107 4097 __SetPageUptodate(page); 4108 - set_page_huge_active(page); 4109 4098 4110 4099 mapping = dst_vma->vm_file->f_mapping; 4111 4100 idx = vma_hugecache_offset(h, dst_vma, dst_addr); ··· 4172 4163 update_mmu_cache(dst_vma, dst_addr, dst_pte); 4173 4164 4174 4165 spin_unlock(ptl); 4166 + set_page_huge_active(page); 4175 4167 if (vm_shared) 4176 4168 unlock_page(page); 4177 4169 ret = 0;
+11
mm/migrate.c
··· 1315 1315 lock_page(hpage); 1316 1316 } 1317 1317 1318 + /* 1319 + * Check for pages which are in the process of being freed. Without 1320 + * page_mapping() set, hugetlbfs specific move page routine will not 1321 + * be called and we could leak usage counts for subpools. 1322 + */ 1323 + if (page_private(hpage) && !page_mapping(hpage)) { 1324 + rc = -EBUSY; 1325 + goto out_unlock; 1326 + } 1327 + 1318 1328 if (PageAnon(hpage)) 1319 1329 anon_vma = page_get_anon_vma(hpage); 1320 1330 ··· 1355 1345 put_new_page = NULL; 1356 1346 } 1357 1347 1348 + out_unlock: 1358 1349 unlock_page(hpage); 1359 1350 out: 1360 1351 if (rc != -EAGAIN)
+1 -1
scripts/Makefile.kasan
··· 26 26 CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ 27 27 $(call cc-param,asan-globals=1) \ 28 28 $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ 29 - $(call cc-param,asan-stack=1) \ 29 + $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \ 30 30 $(call cc-param,asan-use-after-scope=1) \ 31 31 $(call cc-param,asan-instrument-allocas=1) 32 32 endif