Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()

kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask and
always allocate memory using the hardcoded GFP_KERNEL flag. This makes
them inconsistent with vmalloc(), which was recently extended to support
GFP_NOFS and GFP_NOIO allocations.

Page table allocations performed during shadow population also ignore the
external gfp_mask. To preserve the intended semantics of GFP_NOFS and
GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
memalloc scope.

xfs calls vmalloc with GFP_NOFS, so this bug could lead to deadlock.

There was a report here
https://lkml.kernel.org/r/686ea951.050a0220.385921.0016.GAE@google.com

This patch:
- Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
- Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
- Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
around apply_to_page_range();
- Updates vmalloc.c and percpu allocator call sites accordingly.

Link: https://lkml.kernel.org/r/20250831121058.92971-1-urezki@gmail.com
Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reported-by: syzbot+3470c9ffee63e4abafeb@syzkaller.appspotmail.com
Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Uladzislau Rezki (Sony) and committed by
Andrew Morton
79357cd0 04100f77

+31 -14
+3 -3
include/linux/kasan.h
··· 562 562 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 563 563 564 564 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); 565 - int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 565 + int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask); 566 566 void kasan_release_vmalloc(unsigned long start, unsigned long end, 567 567 unsigned long free_region_start, 568 568 unsigned long free_region_end, ··· 574 574 unsigned long size) 575 575 { } 576 576 static inline int kasan_populate_vmalloc(unsigned long start, 577 - unsigned long size) 577 + unsigned long size, gfp_t gfp_mask) 578 578 { 579 579 return 0; 580 580 } ··· 610 610 static inline void kasan_populate_early_vm_area_shadow(void *start, 611 611 unsigned long size) { } 612 612 static inline int kasan_populate_vmalloc(unsigned long start, 613 - unsigned long size) 613 + unsigned long size, gfp_t gfp_mask) 614 614 { 615 615 return 0; 616 616 }
+24 -7
mm/kasan/shadow.c
··· 336 336 } 337 337 } 338 338 339 - static int ___alloc_pages_bulk(struct page **pages, int nr_pages) 339 + static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask) 340 340 { 341 341 unsigned long nr_populated, nr_total = nr_pages; 342 342 struct page **page_array = pages; 343 343 344 344 while (nr_pages) { 345 - nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages); 345 + nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages); 346 346 if (!nr_populated) { 347 347 ___free_pages_bulk(page_array, nr_total - nr_pages); 348 348 return -ENOMEM; ··· 354 354 return 0; 355 355 } 356 356 357 - static int __kasan_populate_vmalloc(unsigned long start, unsigned long end) 357 + static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask) 358 358 { 359 359 unsigned long nr_pages, nr_total = PFN_UP(end - start); 360 360 struct vmalloc_populate_data data; 361 + unsigned int flags; 361 362 int ret = 0; 362 363 363 - data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO); 364 + data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO); 364 365 if (!data.pages) 365 366 return -ENOMEM; 366 367 367 368 while (nr_total) { 368 369 nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0])); 369 - ret = ___alloc_pages_bulk(data.pages, nr_pages); 370 + ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask); 370 371 if (ret) 371 372 break; 372 373 373 374 data.start = start; 375 + 376 + /* 377 + * page tables allocations ignore external gfp mask, enforce it 378 + * by the scope API 379 + */ 380 + if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 381 + flags = memalloc_nofs_save(); 382 + else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 383 + flags = memalloc_noio_save(); 384 + 374 385 ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE, 375 386 kasan_populate_vmalloc_pte, &data); 387 + 388 + if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 389 + memalloc_nofs_restore(flags); 390 + else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 391 + memalloc_noio_restore(flags); 392 + 376 393 ___free_pages_bulk(data.pages, nr_pages); 377 394 if (ret) 378 395 break; ··· 403 386 return ret; 404 387 } 405 388 406 - int kasan_populate_vmalloc(unsigned long addr, unsigned long size) 389 + int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask) 407 390 { 408 391 unsigned long shadow_start, shadow_end; 409 392 int ret; ··· 432 415 shadow_start = PAGE_ALIGN_DOWN(shadow_start); 433 416 shadow_end = PAGE_ALIGN(shadow_end); 434 417 435 - ret = __kasan_populate_vmalloc(shadow_start, shadow_end); 418 + ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask); 436 419 if (ret) 437 420 return ret; 438 421
+4 -4
mm/vmalloc.c
··· 2026 2026 if (unlikely(!vmap_initialized)) 2027 2027 return ERR_PTR(-EBUSY); 2028 2028 2029 + /* Only reclaim behaviour flags are relevant. */ 2030 + gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 2029 2031 might_sleep(); 2030 2032 2031 2033 /* ··· 2040 2038 */ 2041 2039 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 2042 2040 if (!va) { 2043 - gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 2044 - 2045 2041 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 2046 2042 if (unlikely(!va)) 2047 2043 return ERR_PTR(-ENOMEM); ··· 2089 2089 BUG_ON(va->va_start < vstart); 2090 2090 BUG_ON(va->va_end > vend); 2091 2091 2092 - ret = kasan_populate_vmalloc(addr, size); 2092 + ret = kasan_populate_vmalloc(addr, size, gfp_mask); 2093 2093 if (ret) { 2094 2094 free_vmap_area(va); 2095 2095 return ERR_PTR(ret); ··· 4826 4826 4827 4827 /* populate the kasan shadow space */ 4828 4828 for (area = 0; area < nr_vms; area++) { 4829 - if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 4829 + if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL)) 4830 4830 goto err_free_shadow; 4831 4831 } 4832 4832