Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: memcontrol: fix rcu unbalance in get_non_dying_memcg_end()

Currently, get_non_dying_memcg_start() and get_non_dying_memcg_end() both
evaluate cgroup_subsys_on_dfl(memory_cgrp_subsys) independently to
determine whether to acquire or release the RCU read lock.

However, the result of cgroup_subsys_on_dfl() can change dynamically at
runtime due to cgroup hierarchy rebinding (e.g., when the memory
controller is moved between cgroup v1 and v2 hierarchies). This can cause
the following warning:

=====================================
WARNING: bad unlock balance detected!
7.0.0-next-20260420+ #83 Tainted: G W
-------------------------------------
memcg-repro/270 is trying to release lock (rcu_read_lock) at:
[<ffffffff815f57f7>] rcu_read_unlock+0x17/0x60
but there are no more locks to release!

other info that might help us debug this:
1 lock held by memcg-repro/270:
#0: ffff888102fa2088 (vm_lock){++++}-{0:0}, at: do_user_addr_fault+0x285/0x880

stack backtrace:
CPU: 0 UID: 0 PID: 270 Comm: memcg-repro Tainted: G W 7.0.0-next-20260420+ #
Tainted: [W]=WARN
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
Call Trace:
<TASK>
? rcu_read_unlock+0x17/0x60
dump_stack_lvl+0x77/0xb0
print_unlock_imbalance_bug+0xe0/0xf0
? rcu_read_unlock+0x17/0x60
lock_release+0x21d/0x2a0
rcu_read_unlock+0x1c/0x60
do_pte_missing+0x233/0xb40
__handle_mm_fault+0x80e/0xcd0
handle_mm_fault+0x146/0x310
do_user_addr_fault+0x303/0x880
exc_page_fault+0x9b/0x270
asm_exc_page_fault+0x26/0x30
RIP: 0033:0x5590e4eb41ea
Code: 61 cc 66 0f 6f e0 66 0f 61 c2 66 0f db cd 66 0f 69 e2 66 0f 6f d0 66 0f 69 d4 66 0f 61 0
RSP: 002b:00007ffcad25f030 EFLAGS: 00010202
RAX: 00005590e4eb8010 RBX: 00007ffcad260f7d RCX: 00007f73c474d44d
RDX: 00005590e4eb80a0 RSI: 00005590e4eb503c RDI: 000000000000000f
RBP: 00005590e4eb70a0 R08: 0000000000000000 R09: 00007f73c483a680
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 00007ffcad25f180 R14: 00005590e4eb6dd8 R15: 00007f73c4869020
</TASK>
------------[ cut here ]------------

Fix this by explicitly tracking the RCU lock state, ensuring that
rcu_read_unlock() in get_non_dying_memcg_end() is strictly paired with the
lock acquisition, regardless of any runtime rebinding events.

Link: https://lore.kernel.org/20260429073105.44472-1-qi.zheng@linux.dev
Fixes: 8285917d6f38 ("mm: memcontrol: prepare for reparenting non-hierarchical stats")
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Qi Zheng and committed by
Andrew Morton
99ebc509 292411fd

+19 -10
+19 -10
mm/memcontrol.c
··· 805 805 * Used in mod_memcg_state() and mod_memcg_lruvec_state() to avoid race with 806 806 * reparenting of non-hierarchical state_locals. 807 807 */ 808 - static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg) 808 + static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg, 809 + bool *rcu_locked) 809 810 { 810 - if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 811 + /* Rebinding can cause this value to be changed at runtime */ 812 + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 813 + *rcu_locked = false; 811 814 return memcg; 815 + } 812 816 813 817 rcu_read_lock(); 818 + *rcu_locked = true; 814 819 815 820 while (memcg_is_dying(memcg)) 816 821 memcg = parent_mem_cgroup(memcg); ··· 823 818 return memcg; 824 819 } 825 820 826 - static inline void get_non_dying_memcg_end(void) 821 + static inline void get_non_dying_memcg_end(bool rcu_locked) 827 822 { 828 - if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 823 + if (!rcu_locked) 829 824 return; 830 825 831 826 rcu_read_unlock(); 832 827 } 833 828 #else 834 - static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg) 829 + static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg, 830 + bool *rcu_locked) 835 831 { 836 832 return memcg; 837 833 } 838 834 839 - static inline void get_non_dying_memcg_end(void) 835 + static inline void get_non_dying_memcg_end(bool rcu_locked) 840 836 { 841 837 } 842 838 #endif ··· 871 865 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 872 866 int val) 873 867 { 868 + bool rcu_locked = false; 869 + 874 870 if (mem_cgroup_disabled()) 875 871 return; 876 872 877 - memcg = get_non_dying_memcg_start(memcg); 873 + memcg = get_non_dying_memcg_start(memcg, &rcu_locked); 878 874 __mod_memcg_state(memcg, idx, val); 879 - get_non_dying_memcg_end(); 875 + get_non_dying_memcg_end(rcu_locked); 880 876 } 881 877 882 878 #ifdef CONFIG_MEMCG_V1 ··· 941 933 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 942 934 struct mem_cgroup_per_node *pn; 943 935 struct mem_cgroup *memcg; 936 + bool rcu_locked = false; 944 937 945 938 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 946 - memcg = get_non_dying_memcg_start(pn->memcg); 939 + memcg = get_non_dying_memcg_start(pn->memcg, &rcu_locked); 947 940 pn = memcg->nodeinfo[pgdat->node_id]; 948 941 949 942 __mod_memcg_lruvec_state(pn, idx, val); 950 943 951 - get_non_dying_memcg_end(); 944 + get_non_dying_memcg_end(rcu_locked); 952 945 } 953 946 954 947 /**