Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: vmalloc: streamline vmalloc memory accounting

Use a vmstat counter instead of a custom, open-coded atomic. This has
the added benefit of making the data available per-node, and prepares
for cleaning up the memcg accounting as well.

Link: https://lkml.kernel.org/r/20260223160147.3792777-1-hannes@cmpxchg.org
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Johannes Weiner and committed by
Andrew Morton
b9ec0ed9 6b0dd42d

+14 -13
+2 -1
fs/proc/meminfo.c
··· 126 126 show_val_kb(m, "Committed_AS: ", committed); 127 127 seq_printf(m, "VmallocTotal: %8lu kB\n", 128 128 (unsigned long)VMALLOC_TOTAL >> 10); 129 - show_val_kb(m, "VmallocUsed: ", vmalloc_nr_pages()); 129 + show_val_kb(m, "VmallocUsed: ", 130 + global_node_page_state(NR_VMALLOC)); 130 131 show_val_kb(m, "VmallocChunk: ", 0ul); 131 132 show_val_kb(m, "Percpu: ", pcpu_nr_pages()); 132 133
+1
include/linux/mmzone.h
··· 220 220 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 221 221 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ 222 222 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ 223 + NR_VMALLOC, 223 224 NR_KERNEL_STACK_KB, /* measured in KiB */ 224 225 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 225 226 NR_KERNEL_SCS_KB, /* measured in KiB */
-3
include/linux/vmalloc.h
··· 286 286 #ifdef CONFIG_MMU 287 287 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 288 288 289 - unsigned long vmalloc_nr_pages(void); 290 - 291 289 int vm_area_map_pages(struct vm_struct *area, unsigned long start, 292 290 unsigned long end, struct page **pages); 293 291 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, ··· 302 304 #else /* !CONFIG_MMU */ 303 305 #define VMALLOC_TOTAL 0UL 304 306 305 - static inline unsigned long vmalloc_nr_pages(void) { return 0; } 306 307 static inline void set_vm_flush_reset_perms(void *addr) {} 307 308 #endif /* CONFIG_MMU */ 308 309
+10 -9
mm/vmalloc.c
··· 1068 1068 static void drain_vmap_area_work(struct work_struct *work); 1069 1069 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 1070 1070 1071 - static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages; 1072 1071 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr; 1073 - 1074 - unsigned long vmalloc_nr_pages(void) 1075 - { 1076 - return atomic_long_read(&nr_vmalloc_pages); 1077 - } 1078 1072 1079 1073 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 1080 1074 { ··· 3470 3476 * High-order allocs for huge vmallocs are split, so 3471 3477 * can be freed as an array of order-0 allocations 3472 3478 */ 3479 + if (!(vm->flags & VM_MAP_PUT_PAGES)) 3480 + dec_node_page_state(page, NR_VMALLOC); 3473 3481 __free_page(page); 3474 3482 cond_resched(); 3475 3483 } 3476 - if (!(vm->flags & VM_MAP_PUT_PAGES)) 3477 - atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 3478 3484 kvfree(vm->pages); 3479 3485 kfree(vm); 3480 3486 } ··· 3662 3668 continue; 3663 3669 } 3664 3670 3671 + mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << large_order); 3672 + 3665 3673 split_page(page, large_order); 3666 3674 for (i = 0; i < (1U << large_order); i++) 3667 3675 pages[nr_allocated + i] = page + i; ··· 3684 3688 if (!order) { 3685 3689 while (nr_allocated < nr_pages) { 3686 3690 unsigned int nr, nr_pages_request; 3691 + int i; 3687 3692 3688 3693 /* 3689 3694 * A maximum allowed request is hard-coded and is 100 ··· 3707 3710 nr = alloc_pages_bulk_node_noprof(gfp, nid, 3708 3711 nr_pages_request, 3709 3712 pages + nr_allocated); 3713 + 3714 + for (i = nr_allocated; i < nr_allocated + nr; i++) 3715 + inc_node_page_state(pages[i], NR_VMALLOC); 3710 3716 3711 3717 nr_allocated += nr; 3712 3718 ··· 3734 3734 3735 3735 if (unlikely(!page)) 3736 3736 break; 3737 + 3738 + mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << order); 3737 3739 3738 3740 /* 3739 3741 * High-order allocations must be able to be treated as ··· 3879 3877 vmalloc_gfp_adjust(gfp_mask, page_order), node, 3880 3878 page_order, nr_small_pages, area->pages); 3881 3879 3882 - atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 3883 3880 /* All pages of vm should be charged to same memcg, so use first one. */ 3884 3881 if (gfp_mask & __GFP_ACCOUNT && area->nr_pages) 3885 3882 mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC,
+1
mm/vmstat.c
··· 1255 1255 [I(NR_KERNEL_MISC_RECLAIMABLE)] = "nr_kernel_misc_reclaimable", 1256 1256 [I(NR_FOLL_PIN_ACQUIRED)] = "nr_foll_pin_acquired", 1257 1257 [I(NR_FOLL_PIN_RELEASED)] = "nr_foll_pin_released", 1258 + [I(NR_VMALLOC)] = "nr_vmalloc", 1258 1259 [I(NR_KERNEL_STACK_KB)] = "nr_kernel_stack", 1259 1260 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 1260 1261 [I(NR_KERNEL_SCS_KB)] = "nr_shadow_call_stack",