Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

memcg: remove __lruvec_stat_mod_folio

__lruvec_stat_mod_folio() is already safe against irqs, so there is no
need to have a separate interface (i.e. lruvec_stat_mod_folio) which
wraps calls to it with irq disabling and reenabling. Let's rename
__lruvec_stat_mod_folio() to lruvec_stat_mod_folio().

Link: https://lkml.kernel.org/r/20251110232008.1352063-5-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Shakeel Butt and committed by
Andrew Morton
c1bd0999 5b3eb779

+25 -53
+1 -29
include/linux/vmstat.h
··· 523 523 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 524 524 int val); 525 525 526 - void __lruvec_stat_mod_folio(struct folio *folio, 526 + void lruvec_stat_mod_folio(struct folio *folio, 527 527 enum node_stat_item idx, int val); 528 - 529 - static inline void lruvec_stat_mod_folio(struct folio *folio, 530 - enum node_stat_item idx, int val) 531 - { 532 - unsigned long flags; 533 - 534 - local_irq_save(flags); 535 - __lruvec_stat_mod_folio(folio, idx, val); 536 - local_irq_restore(flags); 537 - } 538 528 539 529 static inline void mod_lruvec_page_state(struct page *page, 540 530 enum node_stat_item idx, int val) ··· 540 550 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 541 551 } 542 552 543 - static inline void __lruvec_stat_mod_folio(struct folio *folio, 544 - enum node_stat_item idx, int val) 545 - { 546 - mod_node_page_state(folio_pgdat(folio), idx, val); 547 - } 548 - 549 553 static inline void lruvec_stat_mod_folio(struct folio *folio, 550 554 enum node_stat_item idx, int val) 551 555 { ··· 553 569 } 554 570 555 571 #endif /* CONFIG_MEMCG */ 556 - 557 - static inline void __lruvec_stat_add_folio(struct folio *folio, 558 - enum node_stat_item idx) 559 - { 560 - __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); 561 - } 562 - 563 - static inline void __lruvec_stat_sub_folio(struct folio *folio, 564 - enum node_stat_item idx) 565 - { 566 - __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); 567 - } 568 572 569 573 static inline void lruvec_stat_add_folio(struct folio *folio, 570 574 enum node_stat_item idx)
+10 -10
mm/filemap.c
··· 182 182 183 183 nr = folio_nr_pages(folio); 184 184 185 - __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 185 + lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 186 186 if (folio_test_swapbacked(folio)) { 187 - __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 187 + lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 188 188 if (folio_test_pmd_mappable(folio)) 189 - __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); 189 + lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); 190 190 } else if (folio_test_pmd_mappable(folio)) { 191 - __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); 191 + lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); 192 192 filemap_nr_thps_dec(mapping); 193 193 } 194 194 if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags)) ··· 844 844 old->mapping = NULL; 845 845 /* hugetlb pages do not participate in page cache accounting. */ 846 846 if (!folio_test_hugetlb(old)) 847 - __lruvec_stat_sub_folio(old, NR_FILE_PAGES); 847 + lruvec_stat_sub_folio(old, NR_FILE_PAGES); 848 848 if (!folio_test_hugetlb(new)) 849 - __lruvec_stat_add_folio(new, NR_FILE_PAGES); 849 + lruvec_stat_add_folio(new, NR_FILE_PAGES); 850 850 if (folio_test_swapbacked(old)) 851 - __lruvec_stat_sub_folio(old, NR_SHMEM); 851 + lruvec_stat_sub_folio(old, NR_SHMEM); 852 852 if (folio_test_swapbacked(new)) 853 - __lruvec_stat_add_folio(new, NR_SHMEM); 853 + lruvec_stat_add_folio(new, NR_SHMEM); 854 854 xas_unlock_irq(&xas); 855 855 if (free_folio) 856 856 free_folio(old); ··· 933 933 934 934 /* hugetlb pages do not participate in page cache accounting */ 935 935 if (!huge) { 936 - __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 936 + lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 937 937 if (folio_test_pmd_mappable(folio)) 938 - __lruvec_stat_mod_folio(folio, 938 + lruvec_stat_mod_folio(folio, 939 939 NR_FILE_THPS, nr); 940 940 } 941 941
+2 -2
mm/huge_memory.c
··· 3783 3783 if (folio_test_pmd_mappable(folio) && 3784 3784 new_order < HPAGE_PMD_ORDER) { 3785 3785 if (folio_test_swapbacked(folio)) { 3786 - __lruvec_stat_mod_folio(folio, 3786 + lruvec_stat_mod_folio(folio, 3787 3787 NR_SHMEM_THPS, -nr); 3788 3788 } else { 3789 - __lruvec_stat_mod_folio(folio, 3789 + lruvec_stat_mod_folio(folio, 3790 3790 NR_FILE_THPS, -nr); 3791 3791 filemap_nr_thps_dec(mapping); 3792 3792 }
+4 -4
mm/khugepaged.c
··· 2195 2195 } 2196 2196 2197 2197 if (is_shmem) 2198 - __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR); 2198 + lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR); 2199 2199 else 2200 - __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR); 2200 + lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR); 2201 2201 2202 2202 if (nr_none) { 2203 - __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none); 2203 + lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none); 2204 2204 /* nr_none is always 0 for non-shmem. */ 2205 - __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none); 2205 + lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none); 2206 2206 } 2207 2207 2208 2208 /*
+2 -2
mm/memcontrol.c
··· 777 777 mod_memcg_lruvec_state(lruvec, idx, val); 778 778 } 779 779 780 - void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, 780 + void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, 781 781 int val) 782 782 { 783 783 struct mem_cgroup *memcg; ··· 797 797 mod_lruvec_state(lruvec, idx, val); 798 798 rcu_read_unlock(); 799 799 } 800 - EXPORT_SYMBOL(__lruvec_stat_mod_folio); 800 + EXPORT_SYMBOL(lruvec_stat_mod_folio); 801 801 802 802 void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 803 803 {
+1 -1
mm/page-writeback.c
··· 2658 2658 inode_attach_wb(inode, folio); 2659 2659 wb = inode_to_wb(inode); 2660 2660 2661 - __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr); 2661 + lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr); 2662 2662 __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); 2663 2663 __node_stat_mod_folio(folio, NR_DIRTIED, nr); 2664 2664 wb_stat_mod(wb, WB_RECLAIMABLE, nr);
+2 -2
mm/rmap.c
··· 1212 1212 1213 1213 if (nr) { 1214 1214 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 1215 - __lruvec_stat_mod_folio(folio, idx, nr); 1215 + lruvec_stat_mod_folio(folio, idx, nr); 1216 1216 } 1217 1217 if (nr_pmdmapped) { 1218 1218 if (folio_test_anon(folio)) { 1219 1219 idx = NR_ANON_THPS; 1220 - __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); 1220 + lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); 1221 1221 } else { 1222 1222 /* NR_*_PMDMAPPED are not maintained per-memcg */ 1223 1223 idx = folio_test_swapbacked(folio) ?
+3 -3
mm/shmem.c
··· 871 871 static void shmem_update_stats(struct folio *folio, int nr_pages) 872 872 { 873 873 if (folio_test_pmd_mappable(folio)) 874 - __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages); 875 - __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages); 876 - __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages); 874 + lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages); 875 + lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages); 876 + lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages); 877 877 } 878 878 879 879 /*