Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28#include <linux/cgroup-defs.h>
29#include <linux/page_counter.h>
30#include <linux/memcontrol.h>
31#include <linux/cgroup.h>
32#include <linux/cpuset.h>
33#include <linux/sched/mm.h>
34#include <linux/shmem_fs.h>
35#include <linux/hugetlb.h>
36#include <linux/pagemap.h>
37#include <linux/folio_batch.h>
38#include <linux/vm_event_item.h>
39#include <linux/smp.h>
40#include <linux/page-flags.h>
41#include <linux/backing-dev.h>
42#include <linux/bit_spinlock.h>
43#include <linux/rcupdate.h>
44#include <linux/limits.h>
45#include <linux/export.h>
46#include <linux/list.h>
47#include <linux/mutex.h>
48#include <linux/rbtree.h>
49#include <linux/slab.h>
50#include <linux/swapops.h>
51#include <linux/spinlock.h>
52#include <linux/fs.h>
53#include <linux/seq_file.h>
54#include <linux/vmpressure.h>
55#include <linux/memremap.h>
56#include <linux/mm_inline.h>
57#include <linux/swap_cgroup.h>
58#include <linux/cpu.h>
59#include <linux/oom.h>
60#include <linux/lockdep.h>
61#include <linux/resume_user_mode.h>
62#include <linux/psi.h>
63#include <linux/seq_buf.h>
64#include <linux/sched/isolation.h>
65#include <linux/kmemleak.h>
66#include "internal.h"
67#include <net/sock.h>
68#include <net/ip.h>
69#include "slab.h"
70#include "memcontrol-v1.h"
71
72#include <linux/uaccess.h>
73
74#define CREATE_TRACE_POINTS
75#include <trace/events/memcg.h>
76#undef CREATE_TRACE_POINTS
77
78#include <trace/events/vmscan.h>
79
80struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81EXPORT_SYMBOL(memory_cgrp_subsys);
82
83struct mem_cgroup *root_mem_cgroup __read_mostly;
84EXPORT_SYMBOL(root_mem_cgroup);
85
86/* Active memory cgroup to use from an interrupt context */
87DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
88EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
89
90/* Socket memory accounting disabled? */
91static bool cgroup_memory_nosocket __ro_after_init;
92
93/* Kernel memory accounting disabled? */
94static bool cgroup_memory_nokmem __ro_after_init;
95
96/* BPF memory accounting disabled? */
97static bool cgroup_memory_nobpf __ro_after_init;
98
99static struct workqueue_struct *memcg_wq __ro_after_init;
100
101static struct kmem_cache *memcg_cachep;
102static struct kmem_cache *memcg_pn_cachep;
103
104#ifdef CONFIG_CGROUP_WRITEBACK
105static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
106#endif
107
108static inline bool task_is_dying(void)
109{
110 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
111 (current->flags & PF_EXITING);
112}
113
114/* Some nice accessors for the vmpressure. */
115struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
116{
117 if (!memcg)
118 memcg = root_mem_cgroup;
119 return &memcg->vmpressure;
120}
121
122struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
123{
124 return container_of(vmpr, struct mem_cgroup, vmpressure);
125}
126
127#define SEQ_BUF_SIZE SZ_4K
128#define CURRENT_OBJCG_UPDATE_BIT 0
129#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
130
131static DEFINE_SPINLOCK(objcg_lock);
132
133bool mem_cgroup_kmem_disabled(void)
134{
135 return cgroup_memory_nokmem;
136}
137
138static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
139
140static void obj_cgroup_release(struct percpu_ref *ref)
141{
142 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
143 unsigned int nr_bytes;
144 unsigned int nr_pages;
145 unsigned long flags;
146
147 /*
148 * At this point all allocated objects are freed, and
149 * objcg->nr_charged_bytes can't have an arbitrary byte value.
150 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
151 *
152 * The following sequence can lead to it:
153 * 1) CPU0: objcg == stock->cached_objcg
154 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
155 * PAGE_SIZE bytes are charged
156 * 3) CPU1: a process from another memcg is allocating something,
157 * the stock if flushed,
158 * objcg->nr_charged_bytes = PAGE_SIZE - 92
159 * 5) CPU0: we do release this object,
160 * 92 bytes are added to stock->nr_bytes
161 * 6) CPU0: stock is flushed,
162 * 92 bytes are added to objcg->nr_charged_bytes
163 *
164 * In the result, nr_charged_bytes == PAGE_SIZE.
165 * This page will be uncharged in obj_cgroup_release().
166 */
167 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
168 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
169 nr_pages = nr_bytes >> PAGE_SHIFT;
170
171 if (nr_pages) {
172 struct mem_cgroup *memcg;
173
174 memcg = get_mem_cgroup_from_objcg(objcg);
175 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
176 memcg1_account_kmem(memcg, -nr_pages);
177 if (!mem_cgroup_is_root(memcg))
178 memcg_uncharge(memcg, nr_pages);
179 mem_cgroup_put(memcg);
180 }
181
182 spin_lock_irqsave(&objcg_lock, flags);
183 list_del(&objcg->list);
184 spin_unlock_irqrestore(&objcg_lock, flags);
185
186 percpu_ref_exit(ref);
187 kfree_rcu(objcg, rcu);
188}
189
190static struct obj_cgroup *obj_cgroup_alloc(void)
191{
192 struct obj_cgroup *objcg;
193 int ret;
194
195 objcg = kzalloc_obj(struct obj_cgroup);
196 if (!objcg)
197 return NULL;
198
199 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
200 GFP_KERNEL);
201 if (ret) {
202 kfree(objcg);
203 return NULL;
204 }
205 INIT_LIST_HEAD(&objcg->list);
206 return objcg;
207}
208
209static inline struct obj_cgroup *__memcg_reparent_objcgs(struct mem_cgroup *memcg,
210 struct mem_cgroup *parent,
211 int nid)
212{
213 struct obj_cgroup *objcg, *iter;
214 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
215 struct mem_cgroup_per_node *parent_pn = parent->nodeinfo[nid];
216
217 objcg = rcu_replace_pointer(pn->objcg, NULL, true);
218 /* 1) Ready to reparent active objcg. */
219 list_add(&objcg->list, &pn->objcg_list);
220 /* 2) Reparent active objcg and already reparented objcgs to parent. */
221 list_for_each_entry(iter, &pn->objcg_list, list)
222 WRITE_ONCE(iter->memcg, parent);
223 /* 3) Move already reparented objcgs to the parent's list */
224 list_splice(&pn->objcg_list, &parent_pn->objcg_list);
225
226 return objcg;
227}
228
229#ifdef CONFIG_MEMCG_V1
230static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force);
231
232static inline void reparent_state_local(struct mem_cgroup *memcg, struct mem_cgroup *parent)
233{
234 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
235 return;
236
237 /*
238 * Reparent stats exposed non-hierarchically. Flush @memcg's stats first
239 * to read its stats accurately , and conservatively flush @parent's
240 * stats after reparenting to avoid hiding a potentially large stat
241 * update (e.g. from callers of mem_cgroup_flush_stats_ratelimited()).
242 */
243 __mem_cgroup_flush_stats(memcg, true);
244
245 /* The following counts are all non-hierarchical and need to be reparented. */
246 reparent_memcg1_state_local(memcg, parent);
247 reparent_memcg1_lruvec_state_local(memcg, parent);
248
249 __mem_cgroup_flush_stats(parent, true);
250}
251#else
252static inline void reparent_state_local(struct mem_cgroup *memcg, struct mem_cgroup *parent)
253{
254}
255#endif
256
257static inline void reparent_locks(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid)
258{
259 spin_lock_irq(&objcg_lock);
260 spin_lock_nested(&mem_cgroup_lruvec(memcg, NODE_DATA(nid))->lru_lock, 1);
261 spin_lock_nested(&mem_cgroup_lruvec(parent, NODE_DATA(nid))->lru_lock, 2);
262}
263
264static inline void reparent_unlocks(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid)
265{
266 spin_unlock(&mem_cgroup_lruvec(parent, NODE_DATA(nid))->lru_lock);
267 spin_unlock(&mem_cgroup_lruvec(memcg, NODE_DATA(nid))->lru_lock);
268 spin_unlock_irq(&objcg_lock);
269}
270
271static void memcg_reparent_objcgs(struct mem_cgroup *memcg)
272{
273 struct obj_cgroup *objcg;
274 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
275 int nid;
276
277 for_each_node(nid) {
278retry:
279 if (lru_gen_enabled())
280 max_lru_gen_memcg(parent, nid);
281
282 reparent_locks(memcg, parent, nid);
283
284 if (lru_gen_enabled()) {
285 if (!recheck_lru_gen_max_memcg(parent, nid)) {
286 reparent_unlocks(memcg, parent, nid);
287 cond_resched();
288 goto retry;
289 }
290 lru_gen_reparent_memcg(memcg, parent, nid);
291 } else {
292 lru_reparent_memcg(memcg, parent, nid);
293 }
294
295 objcg = __memcg_reparent_objcgs(memcg, parent, nid);
296
297 reparent_unlocks(memcg, parent, nid);
298
299 percpu_ref_kill(&objcg->refcnt);
300 }
301
302 reparent_state_local(memcg, parent);
303}
304
305/*
306 * A lot of the calls to the cache allocation functions are expected to be
307 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
308 * conditional to this static branch, we'll have to allow modules that does
309 * kmem_cache_alloc and the such to see this symbol as well
310 */
311DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
312EXPORT_SYMBOL(memcg_kmem_online_key);
313
314DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
315EXPORT_SYMBOL(memcg_bpf_enabled_key);
316
317/**
318 * get_mem_cgroup_css_from_folio - acquire a css of the memcg associated with a folio
319 * @folio: folio of interest
320 *
321 * If memcg is bound to the default hierarchy, css of the memcg associated
322 * with @folio is returned. The returned css remains associated with @folio
323 * until it is released.
324 *
325 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
326 * is returned.
327 */
328struct cgroup_subsys_state *get_mem_cgroup_css_from_folio(struct folio *folio)
329{
330 struct mem_cgroup *memcg;
331
332 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
333 return &root_mem_cgroup->css;
334
335 memcg = get_mem_cgroup_from_folio(folio);
336
337 return memcg ? &memcg->css : &root_mem_cgroup->css;
338}
339
340/**
341 * page_cgroup_ino - return inode number of the memcg a page is charged to
342 * @page: the page
343 *
344 * Look up the closest online ancestor of the memory cgroup @page is charged to
345 * and return its inode number or 0 if @page is not charged to any cgroup. It
346 * is safe to call this function without holding a reference to @page.
347 *
348 * Note, this function is inherently racy, because there is nothing to prevent
349 * the cgroup inode from getting torn down and potentially reallocated a moment
350 * after page_cgroup_ino() returns, so it only should be used by callers that
351 * do not care (such as procfs interfaces).
352 */
353ino_t page_cgroup_ino(struct page *page)
354{
355 struct mem_cgroup *memcg;
356 unsigned long ino = 0;
357
358 rcu_read_lock();
359 /* page_folio() is racy here, but the entire function is racy anyway */
360 memcg = folio_memcg_check(page_folio(page));
361
362 while (memcg && !css_is_online(&memcg->css))
363 memcg = parent_mem_cgroup(memcg);
364 if (memcg)
365 ino = cgroup_ino(memcg->css.cgroup);
366 rcu_read_unlock();
367 return ino;
368}
369EXPORT_SYMBOL_GPL(page_cgroup_ino);
370
371/* Subset of node_stat_item for memcg stats */
372static const unsigned int memcg_node_stat_items[] = {
373 NR_INACTIVE_ANON,
374 NR_ACTIVE_ANON,
375 NR_INACTIVE_FILE,
376 NR_ACTIVE_FILE,
377 NR_UNEVICTABLE,
378 NR_SLAB_RECLAIMABLE_B,
379 NR_SLAB_UNRECLAIMABLE_B,
380 WORKINGSET_REFAULT_ANON,
381 WORKINGSET_REFAULT_FILE,
382 WORKINGSET_ACTIVATE_ANON,
383 WORKINGSET_ACTIVATE_FILE,
384 WORKINGSET_RESTORE_ANON,
385 WORKINGSET_RESTORE_FILE,
386 WORKINGSET_NODERECLAIM,
387 NR_ANON_MAPPED,
388 NR_FILE_MAPPED,
389 NR_FILE_PAGES,
390 NR_FILE_DIRTY,
391 NR_WRITEBACK,
392 NR_SHMEM,
393 NR_SHMEM_THPS,
394 NR_FILE_THPS,
395 NR_ANON_THPS,
396 NR_VMALLOC,
397 NR_KERNEL_STACK_KB,
398 NR_PAGETABLE,
399 NR_SECONDARY_PAGETABLE,
400#ifdef CONFIG_SWAP
401 NR_SWAPCACHE,
402#endif
403#ifdef CONFIG_NUMA_BALANCING
404 PGPROMOTE_SUCCESS,
405#endif
406 PGDEMOTE_KSWAPD,
407 PGDEMOTE_DIRECT,
408 PGDEMOTE_KHUGEPAGED,
409 PGDEMOTE_PROACTIVE,
410 PGSTEAL_KSWAPD,
411 PGSTEAL_DIRECT,
412 PGSTEAL_KHUGEPAGED,
413 PGSTEAL_PROACTIVE,
414 PGSTEAL_ANON,
415 PGSTEAL_FILE,
416 PGSCAN_KSWAPD,
417 PGSCAN_DIRECT,
418 PGSCAN_KHUGEPAGED,
419 PGSCAN_PROACTIVE,
420 PGSCAN_ANON,
421 PGSCAN_FILE,
422 PGREFILL,
423#ifdef CONFIG_HUGETLB_PAGE
424 NR_HUGETLB,
425#endif
426};
427
428static const unsigned int memcg_stat_items[] = {
429 MEMCG_SWAP,
430 MEMCG_SOCK,
431 MEMCG_PERCPU_B,
432 MEMCG_KMEM,
433 MEMCG_ZSWAP_B,
434 MEMCG_ZSWAPPED,
435 MEMCG_ZSWAP_INCOMP,
436};
437
438#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
439#define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
440 ARRAY_SIZE(memcg_stat_items))
441#define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
442static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
443
444static void init_memcg_stats(void)
445{
446 u8 i, j = 0;
447
448 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
449
450 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
451
452 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
453 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
454
455 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
456 mem_cgroup_stats_index[memcg_stat_items[i]] = j;
457}
458
459static inline int memcg_stats_index(int idx)
460{
461 return mem_cgroup_stats_index[idx];
462}
463
464struct lruvec_stats_percpu {
465 /* Local (CPU and cgroup) state */
466 long state[NR_MEMCG_NODE_STAT_ITEMS];
467
468 /* Delta calculation for lockless upward propagation */
469 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
470};
471
472struct lruvec_stats {
473 /* Aggregated (CPU and subtree) state */
474 long state[NR_MEMCG_NODE_STAT_ITEMS];
475
476 /* Non-hierarchical (CPU aggregated) state */
477 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
478
479 /* Pending child counts during tree propagation */
480 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
481};
482
483unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
484{
485 struct mem_cgroup_per_node *pn;
486 long x;
487 int i;
488
489 if (mem_cgroup_disabled())
490 return node_page_state(lruvec_pgdat(lruvec), idx);
491
492 i = memcg_stats_index(idx);
493 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
494 return 0;
495
496 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
497 x = READ_ONCE(pn->lruvec_stats->state[i]);
498#ifdef CONFIG_SMP
499 if (x < 0)
500 x = 0;
501#endif
502 return x;
503}
504
505unsigned long lruvec_page_state_local(struct lruvec *lruvec,
506 enum node_stat_item idx)
507{
508 struct mem_cgroup_per_node *pn;
509 long x;
510 int i;
511
512 if (mem_cgroup_disabled())
513 return node_page_state(lruvec_pgdat(lruvec), idx);
514
515 i = memcg_stats_index(idx);
516 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
517 return 0;
518
519 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
520 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
521#ifdef CONFIG_SMP
522 if (x < 0)
523 x = 0;
524#endif
525 return x;
526}
527
528#ifdef CONFIG_MEMCG_V1
529static void __mod_memcg_lruvec_state(struct mem_cgroup_per_node *pn,
530 enum node_stat_item idx, long val);
531
532void reparent_memcg_lruvec_state_local(struct mem_cgroup *memcg,
533 struct mem_cgroup *parent, int idx)
534{
535 int nid;
536
537 for_each_node(nid) {
538 struct lruvec *child_lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
539 struct lruvec *parent_lruvec = mem_cgroup_lruvec(parent, NODE_DATA(nid));
540 unsigned long value = lruvec_page_state_local(child_lruvec, idx);
541 struct mem_cgroup_per_node *child_pn, *parent_pn;
542
543 child_pn = container_of(child_lruvec, struct mem_cgroup_per_node, lruvec);
544 parent_pn = container_of(parent_lruvec, struct mem_cgroup_per_node, lruvec);
545
546 __mod_memcg_lruvec_state(child_pn, idx, -value);
547 __mod_memcg_lruvec_state(parent_pn, idx, value);
548 }
549}
550#endif
551
552/* Subset of vm_event_item to report for memcg event stats */
553static const unsigned int memcg_vm_event_stat[] = {
554#ifdef CONFIG_MEMCG_V1
555 PGPGIN,
556 PGPGOUT,
557#endif
558 PSWPIN,
559 PSWPOUT,
560 PGFAULT,
561 PGMAJFAULT,
562 PGACTIVATE,
563 PGDEACTIVATE,
564 PGLAZYFREE,
565 PGLAZYFREED,
566#ifdef CONFIG_SWAP
567 SWPIN_ZERO,
568 SWPOUT_ZERO,
569#endif
570#ifdef CONFIG_ZSWAP
571 ZSWPIN,
572 ZSWPOUT,
573 ZSWPWB,
574#endif
575#ifdef CONFIG_TRANSPARENT_HUGEPAGE
576 THP_FAULT_ALLOC,
577 THP_COLLAPSE_ALLOC,
578 THP_SWPOUT,
579 THP_SWPOUT_FALLBACK,
580#endif
581#ifdef CONFIG_NUMA_BALANCING
582 NUMA_PAGE_MIGRATE,
583 NUMA_PTE_UPDATES,
584 NUMA_HINT_FAULTS,
585#endif
586};
587
588#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
589static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
590
591static void init_memcg_events(void)
592{
593 u8 i;
594
595 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
596
597 memset(mem_cgroup_events_index, U8_MAX,
598 sizeof(mem_cgroup_events_index));
599
600 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
601 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
602}
603
604static inline int memcg_events_index(enum vm_event_item idx)
605{
606 return mem_cgroup_events_index[idx];
607}
608
609struct memcg_vmstats_percpu {
610 /* Stats updates since the last flush */
611 unsigned long stats_updates;
612
613 /* Cached pointers for fast iteration in memcg_rstat_updated() */
614 struct memcg_vmstats_percpu __percpu *parent_pcpu;
615 struct memcg_vmstats *vmstats;
616
617 /* The above should fit a single cacheline for memcg_rstat_updated() */
618
619 /* Local (CPU and cgroup) page state & events */
620 long state[MEMCG_VMSTAT_SIZE];
621 unsigned long events[NR_MEMCG_EVENTS];
622
623 /* Delta calculation for lockless upward propagation */
624 long state_prev[MEMCG_VMSTAT_SIZE];
625 unsigned long events_prev[NR_MEMCG_EVENTS];
626} ____cacheline_aligned;
627
628struct memcg_vmstats {
629 /* Aggregated (CPU and subtree) page state & events */
630 long state[MEMCG_VMSTAT_SIZE];
631 unsigned long events[NR_MEMCG_EVENTS];
632
633 /* Non-hierarchical (CPU aggregated) page state & events */
634 long state_local[MEMCG_VMSTAT_SIZE];
635 unsigned long events_local[NR_MEMCG_EVENTS];
636
637 /* Pending child counts during tree propagation */
638 long state_pending[MEMCG_VMSTAT_SIZE];
639 unsigned long events_pending[NR_MEMCG_EVENTS];
640
641 /* Stats updates since the last flush */
642 atomic_long_t stats_updates;
643};
644
645/*
646 * memcg and lruvec stats flushing
647 *
648 * Many codepaths leading to stats update or read are performance sensitive and
649 * adding stats flushing in such codepaths is not desirable. So, to optimize the
650 * flushing the kernel does:
651 *
652 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
653 * rstat update tree grow unbounded.
654 *
655 * 2) Flush the stats synchronously on reader side only when there are more than
656 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
657 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
658 * only for 2 seconds due to (1).
659 */
660static void flush_memcg_stats_dwork(struct work_struct *w);
661static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
662static u64 flush_last_time;
663
664#define FLUSH_TIME (2UL*HZ)
665
666static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
667{
668 return atomic_long_read(&vmstats->stats_updates) >
669 MEMCG_CHARGE_BATCH * num_online_cpus();
670}
671
672static inline void memcg_rstat_updated(struct mem_cgroup *memcg, long val,
673 int cpu)
674{
675 struct memcg_vmstats_percpu __percpu *statc_pcpu;
676 struct memcg_vmstats_percpu *statc;
677 unsigned long stats_updates;
678
679 if (!val)
680 return;
681
682 css_rstat_updated(&memcg->css, cpu);
683 statc_pcpu = memcg->vmstats_percpu;
684 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
685 statc = this_cpu_ptr(statc_pcpu);
686 /*
687 * If @memcg is already flushable then all its ancestors are
688 * flushable as well and also there is no need to increase
689 * stats_updates.
690 */
691 if (memcg_vmstats_needs_flush(statc->vmstats))
692 break;
693
694 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
695 abs(val));
696 if (stats_updates < MEMCG_CHARGE_BATCH)
697 continue;
698
699 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
700 atomic_long_add(stats_updates, &statc->vmstats->stats_updates);
701 }
702}
703
704static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
705{
706 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
707
708 trace_memcg_flush_stats(memcg, atomic_long_read(&memcg->vmstats->stats_updates),
709 force, needs_flush);
710
711 if (!force && !needs_flush)
712 return;
713
714 if (mem_cgroup_is_root(memcg))
715 WRITE_ONCE(flush_last_time, jiffies_64);
716
717 css_rstat_flush(&memcg->css);
718}
719
720/*
721 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
722 * @memcg: root of the subtree to flush
723 *
724 * Flushing is serialized by the underlying global rstat lock. There is also a
725 * minimum amount of work to be done even if there are no stat updates to flush.
726 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
727 * avoids unnecessary work and contention on the underlying lock.
728 */
729void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
730{
731 if (mem_cgroup_disabled())
732 return;
733
734 if (!memcg)
735 memcg = root_mem_cgroup;
736
737 __mem_cgroup_flush_stats(memcg, false);
738}
739
740void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
741{
742 /* Only flush if the periodic flusher is one full cycle late */
743 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
744 mem_cgroup_flush_stats(memcg);
745}
746
747static void flush_memcg_stats_dwork(struct work_struct *w)
748{
749 /*
750 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
751 * in latency-sensitive paths is as cheap as possible.
752 */
753 __mem_cgroup_flush_stats(root_mem_cgroup, true);
754 queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME);
755}
756
757unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
758{
759 long x;
760 int i = memcg_stats_index(idx);
761
762 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
763 return 0;
764
765 x = READ_ONCE(memcg->vmstats->state[i]);
766#ifdef CONFIG_SMP
767 if (x < 0)
768 x = 0;
769#endif
770 return x;
771}
772
773bool memcg_stat_item_valid(int idx)
774{
775 if ((u32)idx >= MEMCG_NR_STAT)
776 return false;
777
778 return !BAD_STAT_IDX(memcg_stats_index(idx));
779}
780
781static int memcg_page_state_unit(int item);
782
783/*
784 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
785 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
786 */
787static long memcg_state_val_in_pages(int idx, long val)
788{
789 int unit = memcg_page_state_unit(idx);
790 long res;
791
792 if (!val || unit == PAGE_SIZE)
793 return val;
794
795 /* Get the absolute value of (val * unit / PAGE_SIZE). */
796 res = mult_frac(abs(val), unit, PAGE_SIZE);
797 /* Round up zero values. */
798 res = res ? : 1;
799
800 return val < 0 ? -res : res;
801}
802
803#ifdef CONFIG_MEMCG_V1
804/*
805 * Used in mod_memcg_state() and mod_memcg_lruvec_state() to avoid race with
806 * reparenting of non-hierarchical state_locals.
807 */
808static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg,
809 bool *rcu_locked)
810{
811 /* Rebinding can cause this value to be changed at runtime */
812 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
813 *rcu_locked = false;
814 return memcg;
815 }
816
817 rcu_read_lock();
818 *rcu_locked = true;
819
820 while (memcg_is_dying(memcg))
821 memcg = parent_mem_cgroup(memcg);
822
823 return memcg;
824}
825
826static inline void get_non_dying_memcg_end(bool rcu_locked)
827{
828 if (!rcu_locked)
829 return;
830
831 rcu_read_unlock();
832}
833#else
834static inline struct mem_cgroup *get_non_dying_memcg_start(struct mem_cgroup *memcg,
835 bool *rcu_locked)
836{
837 return memcg;
838}
839
840static inline void get_non_dying_memcg_end(bool rcu_locked)
841{
842}
843#endif
844
845static void __mod_memcg_state(struct mem_cgroup *memcg,
846 enum memcg_stat_item idx, long val)
847{
848 int i = memcg_stats_index(idx);
849 int cpu;
850
851 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
852 return;
853
854 cpu = get_cpu();
855
856 this_cpu_add(memcg->vmstats_percpu->state[i], val);
857 val = memcg_state_val_in_pages(idx, val);
858 memcg_rstat_updated(memcg, val, cpu);
859
860 trace_mod_memcg_state(memcg, idx, val);
861
862 put_cpu();
863}
864
865/**
866 * mod_memcg_state - update cgroup memory statistics
867 * @memcg: the memory cgroup
868 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
869 * @val: delta to add to the counter, can be negative
870 */
871void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
872 int val)
873{
874 bool rcu_locked = false;
875
876 if (mem_cgroup_disabled())
877 return;
878
879 memcg = get_non_dying_memcg_start(memcg, &rcu_locked);
880 __mod_memcg_state(memcg, idx, val);
881 get_non_dying_memcg_end(rcu_locked);
882}
883
884#ifdef CONFIG_MEMCG_V1
885/* idx can be of type enum memcg_stat_item or node_stat_item. */
886unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
887{
888 long x;
889 int i = memcg_stats_index(idx);
890
891 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
892 return 0;
893
894 x = READ_ONCE(memcg->vmstats->state_local[i]);
895#ifdef CONFIG_SMP
896 if (x < 0)
897 x = 0;
898#endif
899 return x;
900}
901
902void reparent_memcg_state_local(struct mem_cgroup *memcg,
903 struct mem_cgroup *parent, int idx)
904{
905 unsigned long value = memcg_page_state_local(memcg, idx);
906
907 __mod_memcg_state(memcg, idx, -value);
908 __mod_memcg_state(parent, idx, value);
909}
910#endif
911
912static void __mod_memcg_lruvec_state(struct mem_cgroup_per_node *pn,
913 enum node_stat_item idx, long val)
914{
915 struct mem_cgroup *memcg = pn->memcg;
916 int i = memcg_stats_index(idx);
917 int cpu;
918
919 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
920 return;
921
922 cpu = get_cpu();
923
924 /* Update memcg */
925 this_cpu_add(memcg->vmstats_percpu->state[i], val);
926
927 /* Update lruvec */
928 this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
929
930 val = memcg_state_val_in_pages(idx, val);
931 memcg_rstat_updated(memcg, val, cpu);
932 trace_mod_memcg_lruvec_state(memcg, idx, val);
933
934 put_cpu();
935}
936
937static void mod_memcg_lruvec_state(struct lruvec *lruvec,
938 enum node_stat_item idx,
939 int val)
940{
941 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
942 struct mem_cgroup_per_node *pn;
943 struct mem_cgroup *memcg;
944 bool rcu_locked = false;
945
946 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
947 memcg = get_non_dying_memcg_start(pn->memcg, &rcu_locked);
948 pn = memcg->nodeinfo[pgdat->node_id];
949
950 __mod_memcg_lruvec_state(pn, idx, val);
951
952 get_non_dying_memcg_end(rcu_locked);
953}
954
955/**
956 * mod_lruvec_state - update lruvec memory statistics
957 * @lruvec: the lruvec
958 * @idx: the stat item
959 * @val: delta to add to the counter, can be negative
960 *
961 * The lruvec is the intersection of the NUMA node and a cgroup. This
962 * function updates the all three counters that are affected by a
963 * change of state at this level: per-node, per-cgroup, per-lruvec.
964 */
965void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
966 int val)
967{
968 /* Update node */
969 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
970
971 /* Update memcg and lruvec */
972 if (!mem_cgroup_disabled())
973 mod_memcg_lruvec_state(lruvec, idx, val);
974}
975
976void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
977 int val)
978{
979 struct mem_cgroup *memcg;
980 pg_data_t *pgdat = folio_pgdat(folio);
981 struct lruvec *lruvec;
982
983 rcu_read_lock();
984 memcg = folio_memcg(folio);
985 /* Untracked pages have no memcg, no lruvec. Update only the node */
986 if (!memcg) {
987 rcu_read_unlock();
988 mod_node_page_state(pgdat, idx, val);
989 return;
990 }
991
992 lruvec = mem_cgroup_lruvec(memcg, pgdat);
993 mod_lruvec_state(lruvec, idx, val);
994 rcu_read_unlock();
995}
996EXPORT_SYMBOL(lruvec_stat_mod_folio);
997
998void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
999{
1000 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
1001 struct mem_cgroup *memcg;
1002 struct lruvec *lruvec;
1003
1004 rcu_read_lock();
1005 memcg = mem_cgroup_from_virt(p);
1006
1007 /*
1008 * Untracked pages have no memcg, no lruvec. Update only the
1009 * node. If we reparent the slab objects to the root memcg,
1010 * when we free the slab object, we need to update the per-memcg
1011 * vmstats to keep it correct for the root memcg.
1012 */
1013 if (!memcg) {
1014 mod_node_page_state(pgdat, idx, val);
1015 } else {
1016 lruvec = mem_cgroup_lruvec(memcg, pgdat);
1017 mod_lruvec_state(lruvec, idx, val);
1018 }
1019 rcu_read_unlock();
1020}
1021
1022/**
1023 * count_memcg_events - account VM events in a cgroup
1024 * @memcg: the memory cgroup
1025 * @idx: the event item
1026 * @count: the number of events that occurred
1027 */
1028void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1029 unsigned long count)
1030{
1031 int i = memcg_events_index(idx);
1032 int cpu;
1033
1034 if (mem_cgroup_disabled())
1035 return;
1036
1037 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
1038 return;
1039
1040 cpu = get_cpu();
1041
1042 this_cpu_add(memcg->vmstats_percpu->events[i], count);
1043 memcg_rstat_updated(memcg, count, cpu);
1044 trace_count_memcg_events(memcg, idx, count);
1045
1046 put_cpu();
1047}
1048
1049unsigned long memcg_events(struct mem_cgroup *memcg, int event)
1050{
1051 int i = memcg_events_index(event);
1052
1053 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
1054 return 0;
1055
1056 return READ_ONCE(memcg->vmstats->events[i]);
1057}
1058
1059bool memcg_vm_event_item_valid(enum vm_event_item idx)
1060{
1061 if (idx >= NR_VM_EVENT_ITEMS)
1062 return false;
1063
1064 return !BAD_STAT_IDX(memcg_events_index(idx));
1065}
1066
1067#ifdef CONFIG_MEMCG_V1
1068unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
1069{
1070 int i = memcg_events_index(event);
1071
1072 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
1073 return 0;
1074
1075 return READ_ONCE(memcg->vmstats->events_local[i]);
1076}
1077#endif
1078
1079struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1080{
1081 /*
1082 * mm_update_next_owner() may clear mm->owner to NULL
1083 * if it races with swapoff, page migration, etc.
1084 * So this can be called with p == NULL.
1085 */
1086 if (unlikely(!p))
1087 return NULL;
1088
1089 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1090}
1091EXPORT_SYMBOL(mem_cgroup_from_task);
1092
1093static __always_inline struct mem_cgroup *active_memcg(void)
1094{
1095 if (!in_task())
1096 return this_cpu_read(int_active_memcg);
1097 else
1098 return current->active_memcg;
1099}
1100
1101/**
1102 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1103 * @mm: mm from which memcg should be extracted. It can be NULL.
1104 *
1105 * Obtain a reference on mm->memcg and returns it if successful. If mm
1106 * is NULL, then the memcg is chosen as follows:
1107 * 1) The active memcg, if set.
1108 * 2) current->mm->memcg, if available
1109 * 3) root memcg
1110 * If mem_cgroup is disabled, NULL is returned.
1111 */
1112struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1113{
1114 struct mem_cgroup *memcg;
1115
1116 if (mem_cgroup_disabled())
1117 return NULL;
1118
1119 /*
1120 * Page cache insertions can happen without an
1121 * actual mm context, e.g. during disk probing
1122 * on boot, loopback IO, acct() writes etc.
1123 *
1124 * No need to css_get on root memcg as the reference
1125 * counting is disabled on the root level in the
1126 * cgroup core. See CSS_NO_REF.
1127 */
1128 if (unlikely(!mm)) {
1129 memcg = active_memcg();
1130 if (unlikely(memcg)) {
1131 /* remote memcg must hold a ref */
1132 css_get(&memcg->css);
1133 return memcg;
1134 }
1135 mm = current->mm;
1136 if (unlikely(!mm))
1137 return root_mem_cgroup;
1138 }
1139
1140 rcu_read_lock();
1141 do {
1142 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1143 if (unlikely(!memcg))
1144 memcg = root_mem_cgroup;
1145 } while (!css_tryget(&memcg->css));
1146 rcu_read_unlock();
1147 return memcg;
1148}
1149EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1150
1151/**
1152 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1153 */
1154struct mem_cgroup *get_mem_cgroup_from_current(void)
1155{
1156 struct mem_cgroup *memcg;
1157
1158 if (mem_cgroup_disabled())
1159 return NULL;
1160
1161again:
1162 rcu_read_lock();
1163 memcg = mem_cgroup_from_task(current);
1164 if (!css_tryget(&memcg->css)) {
1165 rcu_read_unlock();
1166 goto again;
1167 }
1168 rcu_read_unlock();
1169 return memcg;
1170}
1171
1172/**
1173 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
1174 * @folio: folio from which memcg should be extracted.
1175 *
1176 * See folio_memcg() for folio->objcg/memcg binding rules.
1177 */
1178struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
1179{
1180 struct mem_cgroup *memcg;
1181
1182 if (mem_cgroup_disabled())
1183 return NULL;
1184
1185 if (!folio_memcg_charged(folio))
1186 return root_mem_cgroup;
1187
1188 rcu_read_lock();
1189 do {
1190 memcg = folio_memcg(folio);
1191 } while (unlikely(!css_tryget(&memcg->css)));
1192 rcu_read_unlock();
1193 return memcg;
1194}
1195
1196/**
1197 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1198 * @root: hierarchy root
1199 * @prev: previously returned memcg, NULL on first invocation
1200 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1201 *
1202 * Returns references to children of the hierarchy below @root, or
1203 * @root itself, or %NULL after a full round-trip.
1204 *
1205 * Caller must pass the return value in @prev on subsequent
1206 * invocations for reference counting, or use mem_cgroup_iter_break()
1207 * to cancel a hierarchy walk before the round-trip is complete.
1208 *
1209 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1210 * in the hierarchy among all concurrent reclaimers operating on the
1211 * same node.
1212 */
1213struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1214 struct mem_cgroup *prev,
1215 struct mem_cgroup_reclaim_cookie *reclaim)
1216{
1217 struct mem_cgroup_reclaim_iter *iter;
1218 struct cgroup_subsys_state *css;
1219 struct mem_cgroup *pos;
1220 struct mem_cgroup *next;
1221
1222 if (mem_cgroup_disabled())
1223 return NULL;
1224
1225 if (!root)
1226 root = root_mem_cgroup;
1227
1228 rcu_read_lock();
1229restart:
1230 next = NULL;
1231
1232 if (reclaim) {
1233 int gen;
1234 int nid = reclaim->pgdat->node_id;
1235
1236 iter = &root->nodeinfo[nid]->iter;
1237 gen = atomic_read(&iter->generation);
1238
1239 /*
1240 * On start, join the current reclaim iteration cycle.
1241 * Exit when a concurrent walker completes it.
1242 */
1243 if (!prev)
1244 reclaim->generation = gen;
1245 else if (reclaim->generation != gen)
1246 goto out_unlock;
1247
1248 pos = READ_ONCE(iter->position);
1249 } else
1250 pos = prev;
1251
1252 css = pos ? &pos->css : NULL;
1253
1254 while ((css = css_next_descendant_pre(css, &root->css))) {
1255 /*
1256 * Verify the css and acquire a reference. The root
1257 * is provided by the caller, so we know it's alive
1258 * and kicking, and don't take an extra reference.
1259 */
1260 if (css == &root->css || css_tryget(css))
1261 break;
1262 }
1263
1264 next = mem_cgroup_from_css(css);
1265
1266 if (reclaim) {
1267 /*
1268 * The position could have already been updated by a competing
1269 * thread, so check that the value hasn't changed since we read
1270 * it to avoid reclaiming from the same cgroup twice.
1271 */
1272 if (cmpxchg(&iter->position, pos, next) != pos) {
1273 if (css && css != &root->css)
1274 css_put(css);
1275 goto restart;
1276 }
1277
1278 if (!next) {
1279 atomic_inc(&iter->generation);
1280
1281 /*
1282 * Reclaimers share the hierarchy walk, and a
1283 * new one might jump in right at the end of
1284 * the hierarchy - make sure they see at least
1285 * one group and restart from the beginning.
1286 */
1287 if (!prev)
1288 goto restart;
1289 }
1290 }
1291
1292out_unlock:
1293 rcu_read_unlock();
1294 if (prev && prev != root)
1295 css_put(&prev->css);
1296
1297 return next;
1298}
1299
1300/**
1301 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1302 * @root: hierarchy root
1303 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1304 */
1305void mem_cgroup_iter_break(struct mem_cgroup *root,
1306 struct mem_cgroup *prev)
1307{
1308 if (!root)
1309 root = root_mem_cgroup;
1310 if (prev && prev != root)
1311 css_put(&prev->css);
1312}
1313
1314static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1315 struct mem_cgroup *dead_memcg)
1316{
1317 struct mem_cgroup_reclaim_iter *iter;
1318 struct mem_cgroup_per_node *mz;
1319 int nid;
1320
1321 for_each_node(nid) {
1322 mz = from->nodeinfo[nid];
1323 iter = &mz->iter;
1324 cmpxchg(&iter->position, dead_memcg, NULL);
1325 }
1326}
1327
1328static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1329{
1330 struct mem_cgroup *memcg = dead_memcg;
1331 struct mem_cgroup *last;
1332
1333 do {
1334 __invalidate_reclaim_iterators(memcg, dead_memcg);
1335 last = memcg;
1336 } while ((memcg = parent_mem_cgroup(memcg)));
1337
1338 /*
1339 * When cgroup1 non-hierarchy mode is used,
1340 * parent_mem_cgroup() does not walk all the way up to the
1341 * cgroup root (root_mem_cgroup). So we have to handle
1342 * dead_memcg from cgroup root separately.
1343 */
1344 if (!mem_cgroup_is_root(last))
1345 __invalidate_reclaim_iterators(root_mem_cgroup,
1346 dead_memcg);
1347}
1348
1349/**
1350 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1351 * @memcg: hierarchy root
1352 * @fn: function to call for each task
1353 * @arg: argument passed to @fn
1354 *
1355 * This function iterates over tasks attached to @memcg or to any of its
1356 * descendants and calls @fn for each task. If @fn returns a non-zero
1357 * value, the function breaks the iteration loop. Otherwise, it will iterate
1358 * over all tasks and return 0.
1359 *
1360 * This function must not be called for the root memory cgroup.
1361 */
1362void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1363 int (*fn)(struct task_struct *, void *), void *arg)
1364{
1365 struct mem_cgroup *iter;
1366 int ret = 0;
1367
1368 BUG_ON(mem_cgroup_is_root(memcg));
1369
1370 for_each_mem_cgroup_tree(iter, memcg) {
1371 struct css_task_iter it;
1372 struct task_struct *task;
1373
1374 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1375 while (!ret && (task = css_task_iter_next(&it))) {
1376 ret = fn(task, arg);
1377 /* Avoid potential softlockup warning */
1378 cond_resched();
1379 }
1380 css_task_iter_end(&it);
1381 if (ret) {
1382 mem_cgroup_iter_break(memcg, iter);
1383 break;
1384 }
1385 }
1386}
1387
1388/**
1389 * folio_lruvec_lock - Lock the lruvec for a folio.
1390 * @folio: Pointer to the folio.
1391 *
1392 * These functions are safe to use under any of the following conditions:
1393 * - folio locked
1394 * - folio_test_lru false
1395 * - folio frozen (refcount of 0)
1396 *
1397 * Return: The lruvec this folio is on with its lock held and rcu read lock held.
1398 */
1399struct lruvec *folio_lruvec_lock(struct folio *folio)
1400{
1401 struct lruvec *lruvec;
1402
1403 rcu_read_lock();
1404retry:
1405 lruvec = folio_lruvec(folio);
1406 spin_lock(&lruvec->lru_lock);
1407 if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
1408 spin_unlock(&lruvec->lru_lock);
1409 goto retry;
1410 }
1411
1412 return lruvec;
1413}
1414
1415/**
1416 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1417 * @folio: Pointer to the folio.
1418 *
1419 * These functions are safe to use under any of the following conditions:
1420 * - folio locked
1421 * - folio_test_lru false
1422 * - folio frozen (refcount of 0)
1423 *
1424 * Return: The lruvec this folio is on with its lock held and interrupts
1425 * disabled and rcu read lock held.
1426 */
1427struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1428{
1429 struct lruvec *lruvec;
1430
1431 rcu_read_lock();
1432retry:
1433 lruvec = folio_lruvec(folio);
1434 spin_lock_irq(&lruvec->lru_lock);
1435 if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
1436 spin_unlock_irq(&lruvec->lru_lock);
1437 goto retry;
1438 }
1439
1440 return lruvec;
1441}
1442
1443/**
1444 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1445 * @folio: Pointer to the folio.
1446 * @flags: Pointer to irqsave flags.
1447 *
1448 * These functions are safe to use under any of the following conditions:
1449 * - folio locked
1450 * - folio_test_lru false
1451 * - folio frozen (refcount of 0)
1452 *
1453 * Return: The lruvec this folio is on with its lock held and interrupts
1454 * disabled and rcu read lock held.
1455 */
1456struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1457 unsigned long *flags)
1458{
1459 struct lruvec *lruvec;
1460
1461 rcu_read_lock();
1462retry:
1463 lruvec = folio_lruvec(folio);
1464 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1465 if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
1466 spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
1467 goto retry;
1468 }
1469
1470 return lruvec;
1471}
1472
1473/**
1474 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1475 * @lruvec: mem_cgroup per zone lru vector
1476 * @lru: index of lru list the page is sitting on
1477 * @zid: zone id of the accounted pages
1478 * @nr_pages: positive when adding or negative when removing
1479 *
1480 * This function must be called under lru_lock, just before a page is added
1481 * to or just after a page is removed from an lru list.
1482 */
1483void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1484 int zid, long nr_pages)
1485{
1486 struct mem_cgroup_per_node *mz;
1487 unsigned long *lru_size;
1488 long size;
1489
1490 if (mem_cgroup_disabled())
1491 return;
1492
1493 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1494 lru_size = &mz->lru_zone_size[zid][lru];
1495
1496 if (nr_pages < 0)
1497 *lru_size += nr_pages;
1498
1499 size = *lru_size;
1500 if (WARN_ONCE(size < 0,
1501 "%s(%p, %d, %ld): lru_size %ld\n",
1502 __func__, lruvec, lru, nr_pages, size)) {
1503 VM_BUG_ON(1);
1504 *lru_size = 0;
1505 }
1506
1507 if (nr_pages > 0)
1508 *lru_size += nr_pages;
1509}
1510
1511/**
1512 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1513 * @memcg: the memory cgroup
1514 *
1515 * Returns the maximum amount of memory @mem can be charged with, in
1516 * pages.
1517 */
1518static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1519{
1520 unsigned long margin = 0;
1521 unsigned long count;
1522 unsigned long limit;
1523
1524 count = page_counter_read(&memcg->memory);
1525 limit = READ_ONCE(memcg->memory.max);
1526 if (count < limit)
1527 margin = limit - count;
1528
1529 if (do_memsw_account()) {
1530 count = page_counter_read(&memcg->memsw);
1531 limit = READ_ONCE(memcg->memsw.max);
1532 if (count < limit)
1533 margin = min(margin, limit - count);
1534 else
1535 margin = 0;
1536 }
1537
1538 return margin;
1539}
1540
1541struct memory_stat {
1542 const char *name;
1543 unsigned int idx;
1544};
1545
1546static const struct memory_stat memory_stats[] = {
1547 { "anon", NR_ANON_MAPPED },
1548 { "file", NR_FILE_PAGES },
1549 { "kernel", MEMCG_KMEM },
1550 { "kernel_stack", NR_KERNEL_STACK_KB },
1551 { "pagetables", NR_PAGETABLE },
1552 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1553 { "percpu", MEMCG_PERCPU_B },
1554 { "sock", MEMCG_SOCK },
1555 { "vmalloc", NR_VMALLOC },
1556 { "shmem", NR_SHMEM },
1557#ifdef CONFIG_ZSWAP
1558 { "zswap", MEMCG_ZSWAP_B },
1559 { "zswapped", MEMCG_ZSWAPPED },
1560 { "zswap_incomp", MEMCG_ZSWAP_INCOMP },
1561#endif
1562 { "file_mapped", NR_FILE_MAPPED },
1563 { "file_dirty", NR_FILE_DIRTY },
1564 { "file_writeback", NR_WRITEBACK },
1565#ifdef CONFIG_SWAP
1566 { "swapcached", NR_SWAPCACHE },
1567#endif
1568#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1569 { "anon_thp", NR_ANON_THPS },
1570 { "file_thp", NR_FILE_THPS },
1571 { "shmem_thp", NR_SHMEM_THPS },
1572#endif
1573 { "inactive_anon", NR_INACTIVE_ANON },
1574 { "active_anon", NR_ACTIVE_ANON },
1575 { "inactive_file", NR_INACTIVE_FILE },
1576 { "active_file", NR_ACTIVE_FILE },
1577 { "unevictable", NR_UNEVICTABLE },
1578 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1579 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1580#ifdef CONFIG_HUGETLB_PAGE
1581 { "hugetlb", NR_HUGETLB },
1582#endif
1583
1584 /* The memory events */
1585 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1586 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1587 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1588 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1589 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1590 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1591 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1592
1593 { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
1594 { "pgdemote_direct", PGDEMOTE_DIRECT },
1595 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
1596 { "pgdemote_proactive", PGDEMOTE_PROACTIVE },
1597 { "pgsteal_kswapd", PGSTEAL_KSWAPD },
1598 { "pgsteal_direct", PGSTEAL_DIRECT },
1599 { "pgsteal_khugepaged", PGSTEAL_KHUGEPAGED },
1600 { "pgsteal_proactive", PGSTEAL_PROACTIVE },
1601 { "pgscan_kswapd", PGSCAN_KSWAPD },
1602 { "pgscan_direct", PGSCAN_DIRECT },
1603 { "pgscan_khugepaged", PGSCAN_KHUGEPAGED },
1604 { "pgscan_proactive", PGSCAN_PROACTIVE },
1605 { "pgrefill", PGREFILL },
1606#ifdef CONFIG_NUMA_BALANCING
1607 { "pgpromote_success", PGPROMOTE_SUCCESS },
1608#endif
1609};
1610
1611/* The actual unit of the state item, not the same as the output unit */
1612static int memcg_page_state_unit(int item)
1613{
1614 switch (item) {
1615 case MEMCG_PERCPU_B:
1616 case MEMCG_ZSWAP_B:
1617 case NR_SLAB_RECLAIMABLE_B:
1618 case NR_SLAB_UNRECLAIMABLE_B:
1619 return 1;
1620 case NR_KERNEL_STACK_KB:
1621 return SZ_1K;
1622 default:
1623 return PAGE_SIZE;
1624 }
1625}
1626
1627/* Translate stat items to the correct unit for memory.stat output */
1628static int memcg_page_state_output_unit(int item)
1629{
1630 /*
1631 * Workingset state is actually in pages, but we export it to userspace
1632 * as a scalar count of events, so special case it here.
1633 *
1634 * Demotion and promotion activities are exported in pages, consistent
1635 * with their global counterparts.
1636 */
1637 switch (item) {
1638 case WORKINGSET_REFAULT_ANON:
1639 case WORKINGSET_REFAULT_FILE:
1640 case WORKINGSET_ACTIVATE_ANON:
1641 case WORKINGSET_ACTIVATE_FILE:
1642 case WORKINGSET_RESTORE_ANON:
1643 case WORKINGSET_RESTORE_FILE:
1644 case WORKINGSET_NODERECLAIM:
1645 case PGDEMOTE_KSWAPD:
1646 case PGDEMOTE_DIRECT:
1647 case PGDEMOTE_KHUGEPAGED:
1648 case PGDEMOTE_PROACTIVE:
1649 case PGSTEAL_KSWAPD:
1650 case PGSTEAL_DIRECT:
1651 case PGSTEAL_KHUGEPAGED:
1652 case PGSTEAL_PROACTIVE:
1653 case PGSCAN_KSWAPD:
1654 case PGSCAN_DIRECT:
1655 case PGSCAN_KHUGEPAGED:
1656 case PGSCAN_PROACTIVE:
1657 case PGREFILL:
1658#ifdef CONFIG_NUMA_BALANCING
1659 case PGPROMOTE_SUCCESS:
1660#endif
1661 return 1;
1662 default:
1663 return memcg_page_state_unit(item);
1664 }
1665}
1666
1667unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1668{
1669 return memcg_page_state(memcg, item) *
1670 memcg_page_state_output_unit(item);
1671}
1672
1673#ifdef CONFIG_MEMCG_V1
1674unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1675{
1676 return memcg_page_state_local(memcg, item) *
1677 memcg_page_state_output_unit(item);
1678}
1679#endif
1680
1681#ifdef CONFIG_HUGETLB_PAGE
1682static bool memcg_accounts_hugetlb(void)
1683{
1684 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1685}
1686#else /* CONFIG_HUGETLB_PAGE */
1687static bool memcg_accounts_hugetlb(void)
1688{
1689 return false;
1690}
1691#endif /* CONFIG_HUGETLB_PAGE */
1692
1693static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1694{
1695 int i;
1696
1697 /*
1698 * Provide statistics on the state of the memory subsystem as
1699 * well as cumulative event counters that show past behavior.
1700 *
1701 * This list is ordered following a combination of these gradients:
1702 * 1) generic big picture -> specifics and details
1703 * 2) reflecting userspace activity -> reflecting kernel heuristics
1704 *
1705 * Current memory state:
1706 */
1707 mem_cgroup_flush_stats(memcg);
1708
1709 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1710 u64 size;
1711
1712#ifdef CONFIG_HUGETLB_PAGE
1713 if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1714 !memcg_accounts_hugetlb())
1715 continue;
1716#endif
1717 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1718 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1719
1720 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1721 size += memcg_page_state_output(memcg,
1722 NR_SLAB_RECLAIMABLE_B);
1723 seq_buf_printf(s, "slab %llu\n", size);
1724 }
1725 }
1726
1727 /* Accumulated memory events */
1728 seq_buf_printf(s, "pgscan %lu\n",
1729 memcg_page_state(memcg, PGSCAN_KSWAPD) +
1730 memcg_page_state(memcg, PGSCAN_DIRECT) +
1731 memcg_page_state(memcg, PGSCAN_PROACTIVE) +
1732 memcg_page_state(memcg, PGSCAN_KHUGEPAGED));
1733 seq_buf_printf(s, "pgsteal %lu\n",
1734 memcg_page_state(memcg, PGSTEAL_KSWAPD) +
1735 memcg_page_state(memcg, PGSTEAL_DIRECT) +
1736 memcg_page_state(memcg, PGSTEAL_PROACTIVE) +
1737 memcg_page_state(memcg, PGSTEAL_KHUGEPAGED));
1738
1739 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1740#ifdef CONFIG_MEMCG_V1
1741 if (memcg_vm_event_stat[i] == PGPGIN ||
1742 memcg_vm_event_stat[i] == PGPGOUT)
1743 continue;
1744#endif
1745 seq_buf_printf(s, "%s %lu\n",
1746 vm_event_name(memcg_vm_event_stat[i]),
1747 memcg_events(memcg, memcg_vm_event_stat[i]));
1748 }
1749}
1750
1751static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1752{
1753 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1754 memcg_stat_format(memcg, s);
1755 else
1756 memcg1_stat_format(memcg, s);
1757 if (seq_buf_has_overflowed(s))
1758 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1759}
1760
1761/**
1762 * mem_cgroup_print_oom_context: Print OOM information relevant to
1763 * memory controller.
1764 * @memcg: The memory cgroup that went over limit
1765 * @p: Task that is going to be killed
1766 *
1767 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1768 * enabled
1769 */
1770void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1771{
1772 rcu_read_lock();
1773
1774 if (memcg) {
1775 pr_cont(",oom_memcg=");
1776 pr_cont_cgroup_path(memcg->css.cgroup);
1777 } else
1778 pr_cont(",global_oom");
1779 if (p) {
1780 pr_cont(",task_memcg=");
1781 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1782 }
1783 rcu_read_unlock();
1784}
1785
1786/**
1787 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1788 * memory controller.
1789 * @memcg: The memory cgroup that went over limit
1790 */
1791void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1792{
1793 /* Use static buffer, for the caller is holding oom_lock. */
1794 static char buf[SEQ_BUF_SIZE];
1795 struct seq_buf s;
1796 unsigned long memory_failcnt;
1797
1798 lockdep_assert_held(&oom_lock);
1799
1800 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1801 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1802 else
1803 memory_failcnt = memcg->memory.failcnt;
1804
1805 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1806 K((u64)page_counter_read(&memcg->memory)),
1807 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1808 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1809 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1810 K((u64)page_counter_read(&memcg->swap)),
1811 K((u64)READ_ONCE(memcg->swap.max)),
1812 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1813#ifdef CONFIG_MEMCG_V1
1814 else {
1815 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1816 K((u64)page_counter_read(&memcg->memsw)),
1817 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1818 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1819 K((u64)page_counter_read(&memcg->kmem)),
1820 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1821 }
1822#endif
1823
1824 pr_info("Memory cgroup stats for ");
1825 pr_cont_cgroup_path(memcg->css.cgroup);
1826 pr_cont(":");
1827 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1828 memory_stat_format(memcg, &s);
1829 seq_buf_do_printk(&s, KERN_INFO);
1830}
1831
1832/*
1833 * Return the memory (and swap, if configured) limit for a memcg.
1834 */
1835unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1836{
1837 unsigned long max = READ_ONCE(memcg->memory.max);
1838
1839 if (do_memsw_account()) {
1840 if (mem_cgroup_swappiness(memcg)) {
1841 /* Calculate swap excess capacity from memsw limit */
1842 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1843
1844 max += min(swap, (unsigned long)total_swap_pages);
1845 }
1846 } else {
1847 if (mem_cgroup_swappiness(memcg))
1848 max += min(READ_ONCE(memcg->swap.max),
1849 (unsigned long)total_swap_pages);
1850 }
1851 return max;
1852}
1853
1854void __memcg_memory_event(struct mem_cgroup *memcg,
1855 enum memcg_memory_event event, bool allow_spinning)
1856{
1857 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1858 event == MEMCG_SWAP_FAIL;
1859
1860 /* For now only MEMCG_MAX can happen with !allow_spinning context. */
1861 VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
1862
1863 atomic_long_inc(&memcg->memory_events_local[event]);
1864 if (!swap_event && allow_spinning)
1865 cgroup_file_notify(&memcg->events_local_file);
1866
1867 do {
1868 atomic_long_inc(&memcg->memory_events[event]);
1869 if (allow_spinning) {
1870 if (swap_event)
1871 cgroup_file_notify(&memcg->swap_events_file);
1872 else
1873 cgroup_file_notify(&memcg->events_file);
1874 }
1875
1876 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1877 break;
1878 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1879 break;
1880 } while ((memcg = parent_mem_cgroup(memcg)) &&
1881 !mem_cgroup_is_root(memcg));
1882}
1883EXPORT_SYMBOL_GPL(__memcg_memory_event);
1884
1885static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1886 int order)
1887{
1888 struct oom_control oc = {
1889 .zonelist = NULL,
1890 .nodemask = NULL,
1891 .memcg = memcg,
1892 .gfp_mask = gfp_mask,
1893 .order = order,
1894 };
1895 bool ret = true;
1896
1897 if (mutex_lock_killable(&oom_lock))
1898 return true;
1899
1900 if (mem_cgroup_margin(memcg) >= (1 << order))
1901 goto unlock;
1902
1903 /*
1904 * A few threads which were not waiting at mutex_lock_killable() can
1905 * fail to bail out. Therefore, check again after holding oom_lock.
1906 */
1907 ret = out_of_memory(&oc);
1908
1909unlock:
1910 mutex_unlock(&oom_lock);
1911 return ret;
1912}
1913
1914/*
1915 * Returns true if successfully killed one or more processes. Though in some
1916 * corner cases it can return true even without killing any process.
1917 */
1918static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1919{
1920 bool locked, ret;
1921
1922 if (order > PAGE_ALLOC_COSTLY_ORDER)
1923 return false;
1924
1925 memcg_memory_event(memcg, MEMCG_OOM);
1926
1927 if (!memcg1_oom_prepare(memcg, &locked))
1928 return false;
1929
1930 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1931
1932 memcg1_oom_finish(memcg, locked);
1933
1934 return ret;
1935}
1936
1937/**
1938 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1939 * @victim: task to be killed by the OOM killer
1940 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1941 *
1942 * Returns a pointer to a memory cgroup, which has to be cleaned up
1943 * by killing all belonging OOM-killable tasks.
1944 *
1945 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1946 */
1947struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1948 struct mem_cgroup *oom_domain)
1949{
1950 struct mem_cgroup *oom_group = NULL;
1951 struct mem_cgroup *memcg;
1952
1953 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1954 return NULL;
1955
1956 if (!oom_domain)
1957 oom_domain = root_mem_cgroup;
1958
1959 rcu_read_lock();
1960
1961 memcg = mem_cgroup_from_task(victim);
1962 if (mem_cgroup_is_root(memcg))
1963 goto out;
1964
1965 /*
1966 * If the victim task has been asynchronously moved to a different
1967 * memory cgroup, we might end up killing tasks outside oom_domain.
1968 * In this case it's better to ignore memory.group.oom.
1969 */
1970 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1971 goto out;
1972
1973 /*
1974 * Traverse the memory cgroup hierarchy from the victim task's
1975 * cgroup up to the OOMing cgroup (or root) to find the
1976 * highest-level memory cgroup with oom.group set.
1977 */
1978 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1979 if (READ_ONCE(memcg->oom_group))
1980 oom_group = memcg;
1981
1982 if (memcg == oom_domain)
1983 break;
1984 }
1985
1986 if (oom_group)
1987 css_get(&oom_group->css);
1988out:
1989 rcu_read_unlock();
1990
1991 return oom_group;
1992}
1993
1994void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1995{
1996 pr_info("Tasks in ");
1997 pr_cont_cgroup_path(memcg->css.cgroup);
1998 pr_cont(" are going to be killed due to memory.oom.group set\n");
1999}
2000
2001/*
2002 * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their
2003 * nr_pages in a single cacheline. This may change in future.
2004 */
2005#define NR_MEMCG_STOCK 7
2006#define FLUSHING_CACHED_CHARGE 0
2007struct memcg_stock_pcp {
2008 local_trylock_t lock;
2009 uint8_t nr_pages[NR_MEMCG_STOCK];
2010 struct mem_cgroup *cached[NR_MEMCG_STOCK];
2011
2012 struct work_struct work;
2013 unsigned long flags;
2014};
2015
2016static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
2017 .lock = INIT_LOCAL_TRYLOCK(lock),
2018};
2019
2020struct obj_stock_pcp {
2021 local_trylock_t lock;
2022 unsigned int nr_bytes;
2023 struct obj_cgroup *cached_objcg;
2024 struct pglist_data *cached_pgdat;
2025 int nr_slab_reclaimable_b;
2026 int nr_slab_unreclaimable_b;
2027
2028 struct work_struct work;
2029 unsigned long flags;
2030};
2031
2032static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = {
2033 .lock = INIT_LOCAL_TRYLOCK(lock),
2034};
2035
2036static DEFINE_MUTEX(percpu_charge_mutex);
2037
2038static void drain_obj_stock(struct obj_stock_pcp *stock);
2039static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
2040 struct mem_cgroup *root_memcg);
2041
2042/**
2043 * consume_stock: Try to consume stocked charge on this cpu.
2044 * @memcg: memcg to consume from.
2045 * @nr_pages: how many pages to charge.
2046 *
2047 * Consume the cached charge if enough nr_pages are present otherwise return
2048 * failure. Also return failure for charge request larger than
2049 * MEMCG_CHARGE_BATCH or if the local lock is already taken.
2050 *
2051 * returns true if successful, false otherwise.
2052 */
2053static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2054{
2055 struct memcg_stock_pcp *stock;
2056 uint8_t stock_pages;
2057 bool ret = false;
2058 int i;
2059
2060 if (nr_pages > MEMCG_CHARGE_BATCH ||
2061 !local_trylock(&memcg_stock.lock))
2062 return ret;
2063
2064 stock = this_cpu_ptr(&memcg_stock);
2065
2066 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2067 if (memcg != READ_ONCE(stock->cached[i]))
2068 continue;
2069
2070 stock_pages = READ_ONCE(stock->nr_pages[i]);
2071 if (stock_pages >= nr_pages) {
2072 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages);
2073 ret = true;
2074 }
2075 break;
2076 }
2077
2078 local_unlock(&memcg_stock.lock);
2079
2080 return ret;
2081}
2082
2083static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
2084{
2085 page_counter_uncharge(&memcg->memory, nr_pages);
2086 if (do_memsw_account())
2087 page_counter_uncharge(&memcg->memsw, nr_pages);
2088}
2089
2090/*
2091 * Returns stocks cached in percpu and reset cached information.
2092 */
2093static void drain_stock(struct memcg_stock_pcp *stock, int i)
2094{
2095 struct mem_cgroup *old = READ_ONCE(stock->cached[i]);
2096 uint8_t stock_pages;
2097
2098 if (!old)
2099 return;
2100
2101 stock_pages = READ_ONCE(stock->nr_pages[i]);
2102 if (stock_pages) {
2103 memcg_uncharge(old, stock_pages);
2104 WRITE_ONCE(stock->nr_pages[i], 0);
2105 }
2106
2107 css_put(&old->css);
2108 WRITE_ONCE(stock->cached[i], NULL);
2109}
2110
2111static void drain_stock_fully(struct memcg_stock_pcp *stock)
2112{
2113 int i;
2114
2115 for (i = 0; i < NR_MEMCG_STOCK; ++i)
2116 drain_stock(stock, i);
2117}
2118
2119static void drain_local_memcg_stock(struct work_struct *dummy)
2120{
2121 struct memcg_stock_pcp *stock;
2122
2123 if (WARN_ONCE(!in_task(), "drain in non-task context"))
2124 return;
2125
2126 local_lock(&memcg_stock.lock);
2127
2128 stock = this_cpu_ptr(&memcg_stock);
2129 drain_stock_fully(stock);
2130 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2131
2132 local_unlock(&memcg_stock.lock);
2133}
2134
2135static void drain_local_obj_stock(struct work_struct *dummy)
2136{
2137 struct obj_stock_pcp *stock;
2138
2139 if (WARN_ONCE(!in_task(), "drain in non-task context"))
2140 return;
2141
2142 local_lock(&obj_stock.lock);
2143
2144 stock = this_cpu_ptr(&obj_stock);
2145 drain_obj_stock(stock);
2146 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2147
2148 local_unlock(&obj_stock.lock);
2149}
2150
2151static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2152{
2153 struct memcg_stock_pcp *stock;
2154 struct mem_cgroup *cached;
2155 uint8_t stock_pages;
2156 bool success = false;
2157 int empty_slot = -1;
2158 int i;
2159
2160 /*
2161 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we
2162 * decide to increase it more than 127 then we will need more careful
2163 * handling of nr_pages[] in struct memcg_stock_pcp.
2164 */
2165 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX);
2166
2167 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
2168
2169 if (nr_pages > MEMCG_CHARGE_BATCH ||
2170 !local_trylock(&memcg_stock.lock)) {
2171 /*
2172 * In case of larger than batch refill or unlikely failure to
2173 * lock the percpu memcg_stock.lock, uncharge memcg directly.
2174 */
2175 memcg_uncharge(memcg, nr_pages);
2176 return;
2177 }
2178
2179 stock = this_cpu_ptr(&memcg_stock);
2180 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2181 cached = READ_ONCE(stock->cached[i]);
2182 if (!cached && empty_slot == -1)
2183 empty_slot = i;
2184 if (memcg == READ_ONCE(stock->cached[i])) {
2185 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages;
2186 WRITE_ONCE(stock->nr_pages[i], stock_pages);
2187 if (stock_pages > MEMCG_CHARGE_BATCH)
2188 drain_stock(stock, i);
2189 success = true;
2190 break;
2191 }
2192 }
2193
2194 if (!success) {
2195 i = empty_slot;
2196 if (i == -1) {
2197 i = get_random_u32_below(NR_MEMCG_STOCK);
2198 drain_stock(stock, i);
2199 }
2200 css_get(&memcg->css);
2201 WRITE_ONCE(stock->cached[i], memcg);
2202 WRITE_ONCE(stock->nr_pages[i], nr_pages);
2203 }
2204
2205 local_unlock(&memcg_stock.lock);
2206}
2207
2208static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
2209 struct mem_cgroup *root_memcg)
2210{
2211 struct mem_cgroup *memcg;
2212 bool flush = false;
2213 int i;
2214
2215 rcu_read_lock();
2216 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2217 memcg = READ_ONCE(stock->cached[i]);
2218 if (!memcg)
2219 continue;
2220
2221 if (READ_ONCE(stock->nr_pages[i]) &&
2222 mem_cgroup_is_descendant(memcg, root_memcg)) {
2223 flush = true;
2224 break;
2225 }
2226 }
2227 rcu_read_unlock();
2228 return flush;
2229}
2230
2231static void schedule_drain_work(int cpu, struct work_struct *work)
2232{
2233 /*
2234 * Protect housekeeping cpumask read and work enqueue together
2235 * in the same RCU critical section so that later cpuset isolated
2236 * partition update only need to wait for an RCU GP and flush the
2237 * pending work on newly isolated CPUs.
2238 */
2239 guard(rcu)();
2240 if (!cpu_is_isolated(cpu))
2241 queue_work_on(cpu, memcg_wq, work);
2242}
2243
2244/*
2245 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2246 * of the hierarchy under it.
2247 */
2248void drain_all_stock(struct mem_cgroup *root_memcg)
2249{
2250 int cpu, curcpu;
2251
2252 /* If someone's already draining, avoid adding running more workers. */
2253 if (!mutex_trylock(&percpu_charge_mutex))
2254 return;
2255 /*
2256 * Notify other cpus that system-wide "drain" is running
2257 * We do not care about races with the cpu hotplug because cpu down
2258 * as well as workers from this path always operate on the local
2259 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2260 */
2261 migrate_disable();
2262 curcpu = smp_processor_id();
2263 for_each_online_cpu(cpu) {
2264 struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
2265 struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
2266
2267 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) &&
2268 is_memcg_drain_needed(memcg_st, root_memcg) &&
2269 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2270 &memcg_st->flags)) {
2271 if (cpu == curcpu)
2272 drain_local_memcg_stock(&memcg_st->work);
2273 else
2274 schedule_drain_work(cpu, &memcg_st->work);
2275 }
2276
2277 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
2278 obj_stock_flush_required(obj_st, root_memcg) &&
2279 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2280 &obj_st->flags)) {
2281 if (cpu == curcpu)
2282 drain_local_obj_stock(&obj_st->work);
2283 else
2284 schedule_drain_work(cpu, &obj_st->work);
2285 }
2286 }
2287 migrate_enable();
2288 mutex_unlock(&percpu_charge_mutex);
2289}
2290
2291static int memcg_hotplug_cpu_dead(unsigned int cpu)
2292{
2293 /* no need for the local lock */
2294 drain_obj_stock(&per_cpu(obj_stock, cpu));
2295 drain_stock_fully(&per_cpu(memcg_stock, cpu));
2296
2297 return 0;
2298}
2299
2300static unsigned long reclaim_high(struct mem_cgroup *memcg,
2301 unsigned int nr_pages,
2302 gfp_t gfp_mask)
2303{
2304 unsigned long nr_reclaimed = 0;
2305
2306 do {
2307 unsigned long pflags;
2308
2309 if (page_counter_read(&memcg->memory) <=
2310 READ_ONCE(memcg->memory.high))
2311 continue;
2312
2313 memcg_memory_event(memcg, MEMCG_HIGH);
2314
2315 psi_memstall_enter(&pflags);
2316 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2317 gfp_mask,
2318 MEMCG_RECLAIM_MAY_SWAP,
2319 NULL);
2320 psi_memstall_leave(&pflags);
2321 } while ((memcg = parent_mem_cgroup(memcg)) &&
2322 !mem_cgroup_is_root(memcg));
2323
2324 return nr_reclaimed;
2325}
2326
2327static void high_work_func(struct work_struct *work)
2328{
2329 struct mem_cgroup *memcg;
2330
2331 memcg = container_of(work, struct mem_cgroup, high_work);
2332 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2333}
2334
2335/*
2336 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2337 * enough to still cause a significant slowdown in most cases, while still
2338 * allowing diagnostics and tracing to proceed without becoming stuck.
2339 */
2340#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2341
2342/*
2343 * When calculating the delay, we use these either side of the exponentiation to
2344 * maintain precision and scale to a reasonable number of jiffies (see the table
2345 * below.
2346 *
2347 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2348 * overage ratio to a delay.
2349 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2350 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2351 * to produce a reasonable delay curve.
2352 *
2353 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2354 * reasonable delay curve compared to precision-adjusted overage, not
2355 * penalising heavily at first, but still making sure that growth beyond the
2356 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2357 * example, with a high of 100 megabytes:
2358 *
2359 * +-------+------------------------+
2360 * | usage | time to allocate in ms |
2361 * +-------+------------------------+
2362 * | 100M | 0 |
2363 * | 101M | 6 |
2364 * | 102M | 25 |
2365 * | 103M | 57 |
2366 * | 104M | 102 |
2367 * | 105M | 159 |
2368 * | 106M | 230 |
2369 * | 107M | 313 |
2370 * | 108M | 409 |
2371 * | 109M | 518 |
2372 * | 110M | 639 |
2373 * | 111M | 774 |
2374 * | 112M | 921 |
2375 * | 113M | 1081 |
2376 * | 114M | 1254 |
2377 * | 115M | 1439 |
2378 * | 116M | 1638 |
2379 * | 117M | 1849 |
2380 * | 118M | 2000 |
2381 * | 119M | 2000 |
2382 * | 120M | 2000 |
2383 * +-------+------------------------+
2384 */
2385 #define MEMCG_DELAY_PRECISION_SHIFT 20
2386 #define MEMCG_DELAY_SCALING_SHIFT 14
2387
2388static u64 calculate_overage(unsigned long usage, unsigned long high)
2389{
2390 u64 overage;
2391
2392 if (usage <= high)
2393 return 0;
2394
2395 /*
2396 * Prevent division by 0 in overage calculation by acting as if
2397 * it was a threshold of 1 page
2398 */
2399 high = max(high, 1UL);
2400
2401 overage = usage - high;
2402 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2403 return div64_u64(overage, high);
2404}
2405
2406static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2407{
2408 u64 overage, max_overage = 0;
2409
2410 do {
2411 overage = calculate_overage(page_counter_read(&memcg->memory),
2412 READ_ONCE(memcg->memory.high));
2413 max_overage = max(overage, max_overage);
2414 } while ((memcg = parent_mem_cgroup(memcg)) &&
2415 !mem_cgroup_is_root(memcg));
2416
2417 return max_overage;
2418}
2419
2420static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2421{
2422 u64 overage, max_overage = 0;
2423
2424 do {
2425 overage = calculate_overage(page_counter_read(&memcg->swap),
2426 READ_ONCE(memcg->swap.high));
2427 if (overage)
2428 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2429 max_overage = max(overage, max_overage);
2430 } while ((memcg = parent_mem_cgroup(memcg)) &&
2431 !mem_cgroup_is_root(memcg));
2432
2433 return max_overage;
2434}
2435
2436/*
2437 * Get the number of jiffies that we should penalise a mischievous cgroup which
2438 * is exceeding its memory.high by checking both it and its ancestors.
2439 */
2440static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2441 unsigned int nr_pages,
2442 u64 max_overage)
2443{
2444 unsigned long penalty_jiffies;
2445
2446 if (!max_overage)
2447 return 0;
2448
2449 /*
2450 * We use overage compared to memory.high to calculate the number of
2451 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2452 * fairly lenient on small overages, and increasingly harsh when the
2453 * memcg in question makes it clear that it has no intention of stopping
2454 * its crazy behaviour, so we exponentially increase the delay based on
2455 * overage amount.
2456 */
2457 penalty_jiffies = max_overage * max_overage * HZ;
2458 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2459 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2460
2461 /*
2462 * Factor in the task's own contribution to the overage, such that four
2463 * N-sized allocations are throttled approximately the same as one
2464 * 4N-sized allocation.
2465 *
2466 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2467 * larger the current charge patch is than that.
2468 */
2469 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2470}
2471
2472/*
2473 * Reclaims memory over the high limit. Called directly from
2474 * try_charge() (context permitting), as well as from the userland
2475 * return path where reclaim is always able to block.
2476 */
2477void __mem_cgroup_handle_over_high(gfp_t gfp_mask)
2478{
2479 unsigned long penalty_jiffies;
2480 unsigned long pflags;
2481 unsigned long nr_reclaimed;
2482 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2483 int nr_retries = MAX_RECLAIM_RETRIES;
2484 struct mem_cgroup *memcg;
2485 bool in_retry = false;
2486
2487 memcg = get_mem_cgroup_from_mm(current->mm);
2488 current->memcg_nr_pages_over_high = 0;
2489
2490retry_reclaim:
2491 /*
2492 * Bail if the task is already exiting. Unlike memory.max,
2493 * memory.high enforcement isn't as strict, and there is no
2494 * OOM killer involved, which means the excess could already
2495 * be much bigger (and still growing) than it could for
2496 * memory.max; the dying task could get stuck in fruitless
2497 * reclaim for a long time, which isn't desirable.
2498 */
2499 if (task_is_dying())
2500 goto out;
2501
2502 /*
2503 * The allocating task should reclaim at least the batch size, but for
2504 * subsequent retries we only want to do what's necessary to prevent oom
2505 * or breaching resource isolation.
2506 *
2507 * This is distinct from memory.max or page allocator behaviour because
2508 * memory.high is currently batched, whereas memory.max and the page
2509 * allocator run every time an allocation is made.
2510 */
2511 nr_reclaimed = reclaim_high(memcg,
2512 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2513 gfp_mask);
2514
2515 /*
2516 * memory.high is breached and reclaim is unable to keep up. Throttle
2517 * allocators proactively to slow down excessive growth.
2518 */
2519 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2520 mem_find_max_overage(memcg));
2521
2522 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2523 swap_find_max_overage(memcg));
2524
2525 /*
2526 * Clamp the max delay per usermode return so as to still keep the
2527 * application moving forwards and also permit diagnostics, albeit
2528 * extremely slowly.
2529 */
2530 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2531
2532 /*
2533 * Don't sleep if the amount of jiffies this memcg owes us is so low
2534 * that it's not even worth doing, in an attempt to be nice to those who
2535 * go only a small amount over their memory.high value and maybe haven't
2536 * been aggressively reclaimed enough yet.
2537 */
2538 if (penalty_jiffies <= HZ / 100)
2539 goto out;
2540
2541 /*
2542 * If reclaim is making forward progress but we're still over
2543 * memory.high, we want to encourage that rather than doing allocator
2544 * throttling.
2545 */
2546 if (nr_reclaimed || nr_retries--) {
2547 in_retry = true;
2548 goto retry_reclaim;
2549 }
2550
2551 /*
2552 * Reclaim didn't manage to push usage below the limit, slow
2553 * this allocating task down.
2554 *
2555 * If we exit early, we're guaranteed to die (since
2556 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2557 * need to account for any ill-begotten jiffies to pay them off later.
2558 */
2559 psi_memstall_enter(&pflags);
2560 schedule_timeout_killable(penalty_jiffies);
2561 psi_memstall_leave(&pflags);
2562
2563out:
2564 css_put(&memcg->css);
2565}
2566
2567static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2568 unsigned int nr_pages)
2569{
2570 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2571 int nr_retries = MAX_RECLAIM_RETRIES;
2572 struct mem_cgroup *mem_over_limit;
2573 struct page_counter *counter;
2574 unsigned long nr_reclaimed;
2575 bool passed_oom = false;
2576 unsigned int reclaim_options;
2577 bool drained = false;
2578 bool raised_max_event = false;
2579 unsigned long pflags;
2580 bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
2581
2582retry:
2583 if (consume_stock(memcg, nr_pages))
2584 return 0;
2585
2586 if (!allow_spinning)
2587 /* Avoid the refill and flush of the older stock */
2588 batch = nr_pages;
2589
2590 reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2591 if (!do_memsw_account() ||
2592 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2593 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2594 goto done_restock;
2595 if (do_memsw_account())
2596 page_counter_uncharge(&memcg->memsw, batch);
2597 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2598 } else {
2599 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2600 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2601 }
2602
2603 if (batch > nr_pages) {
2604 batch = nr_pages;
2605 goto retry;
2606 }
2607
2608 /*
2609 * Prevent unbounded recursion when reclaim operations need to
2610 * allocate memory. This might exceed the limits temporarily,
2611 * but we prefer facilitating memory reclaim and getting back
2612 * under the limit over triggering OOM kills in these cases.
2613 */
2614 if (unlikely(current->flags & PF_MEMALLOC))
2615 goto force;
2616
2617 if (unlikely(task_in_memcg_oom(current)))
2618 goto nomem;
2619
2620 if (!gfpflags_allow_blocking(gfp_mask))
2621 goto nomem;
2622
2623 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2624 raised_max_event = true;
2625
2626 psi_memstall_enter(&pflags);
2627 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2628 gfp_mask, reclaim_options, NULL);
2629 psi_memstall_leave(&pflags);
2630
2631 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2632 goto retry;
2633
2634 if (!drained) {
2635 drain_all_stock(mem_over_limit);
2636 drained = true;
2637 goto retry;
2638 }
2639
2640 if (gfp_mask & __GFP_NORETRY)
2641 goto nomem;
2642 /*
2643 * Even though the limit is exceeded at this point, reclaim
2644 * may have been able to free some pages. Retry the charge
2645 * before killing the task.
2646 *
2647 * Only for regular pages, though: huge pages are rather
2648 * unlikely to succeed so close to the limit, and we fall back
2649 * to regular pages anyway in case of failure.
2650 */
2651 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2652 goto retry;
2653
2654 if (nr_retries--)
2655 goto retry;
2656
2657 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2658 goto nomem;
2659
2660 /* Avoid endless loop for tasks bypassed by the oom killer */
2661 if (passed_oom && task_is_dying())
2662 goto nomem;
2663
2664 /*
2665 * keep retrying as long as the memcg oom killer is able to make
2666 * a forward progress or bypass the charge if the oom killer
2667 * couldn't make any progress.
2668 */
2669 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2670 get_order(nr_pages * PAGE_SIZE))) {
2671 passed_oom = true;
2672 nr_retries = MAX_RECLAIM_RETRIES;
2673 goto retry;
2674 }
2675nomem:
2676 /*
2677 * Memcg doesn't have a dedicated reserve for atomic
2678 * allocations. But like the global atomic pool, we need to
2679 * put the burden of reclaim on regular allocation requests
2680 * and let these go through as privileged allocations.
2681 */
2682 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2683 return -ENOMEM;
2684force:
2685 /*
2686 * If the allocation has to be enforced, don't forget to raise
2687 * a MEMCG_MAX event.
2688 */
2689 if (!raised_max_event)
2690 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2691
2692 /*
2693 * The allocation either can't fail or will lead to more memory
2694 * being freed very soon. Allow memory usage go over the limit
2695 * temporarily by force charging it.
2696 */
2697 page_counter_charge(&memcg->memory, nr_pages);
2698 if (do_memsw_account())
2699 page_counter_charge(&memcg->memsw, nr_pages);
2700
2701 return 0;
2702
2703done_restock:
2704 if (batch > nr_pages)
2705 refill_stock(memcg, batch - nr_pages);
2706
2707 /*
2708 * If the hierarchy is above the normal consumption range, schedule
2709 * reclaim on returning to userland. We can perform reclaim here
2710 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2711 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2712 * not recorded as it most likely matches current's and won't
2713 * change in the meantime. As high limit is checked again before
2714 * reclaim, the cost of mismatch is negligible.
2715 */
2716 do {
2717 bool mem_high, swap_high;
2718
2719 mem_high = page_counter_read(&memcg->memory) >
2720 READ_ONCE(memcg->memory.high);
2721 swap_high = page_counter_read(&memcg->swap) >
2722 READ_ONCE(memcg->swap.high);
2723
2724 /* Don't bother a random interrupted task */
2725 if (!in_task()) {
2726 if (mem_high) {
2727 schedule_work(&memcg->high_work);
2728 break;
2729 }
2730 continue;
2731 }
2732
2733 if (mem_high || swap_high) {
2734 /*
2735 * The allocating tasks in this cgroup will need to do
2736 * reclaim or be throttled to prevent further growth
2737 * of the memory or swap footprints.
2738 *
2739 * Target some best-effort fairness between the tasks,
2740 * and distribute reclaim work and delay penalties
2741 * based on how much each task is actually allocating.
2742 */
2743 current->memcg_nr_pages_over_high += batch;
2744 set_notify_resume(current);
2745 break;
2746 }
2747 } while ((memcg = parent_mem_cgroup(memcg)));
2748
2749 /*
2750 * Reclaim is set up above to be called from the userland
2751 * return path. But also attempt synchronous reclaim to avoid
2752 * excessive overrun while the task is still inside the
2753 * kernel. If this is successful, the return path will see it
2754 * when it rechecks the overage and simply bail out.
2755 */
2756 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2757 !(current->flags & PF_MEMALLOC) &&
2758 gfpflags_allow_blocking(gfp_mask))
2759 __mem_cgroup_handle_over_high(gfp_mask);
2760 return 0;
2761}
2762
2763static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2764 unsigned int nr_pages)
2765{
2766 if (mem_cgroup_is_root(memcg))
2767 return 0;
2768
2769 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2770}
2771
2772static void commit_charge(struct folio *folio, struct obj_cgroup *objcg)
2773{
2774 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2775 /*
2776 * Any of the following ensures folio's objcg stability:
2777 *
2778 * - the page lock
2779 * - LRU isolation
2780 * - exclusive reference
2781 */
2782 folio->memcg_data = (unsigned long)objcg;
2783}
2784
2785#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
2786static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2787 struct pglist_data *pgdat,
2788 enum node_stat_item idx, int nr)
2789{
2790 struct lruvec *lruvec;
2791
2792 if (likely(!in_nmi())) {
2793 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2794 mod_memcg_lruvec_state(lruvec, idx, nr);
2795 } else {
2796 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
2797
2798 /* preemption is disabled in_nmi(). */
2799 css_rstat_updated(&memcg->css, smp_processor_id());
2800 if (idx == NR_SLAB_RECLAIMABLE_B)
2801 atomic_add(nr, &pn->slab_reclaimable);
2802 else
2803 atomic_add(nr, &pn->slab_unreclaimable);
2804 }
2805}
2806#else
2807static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2808 struct pglist_data *pgdat,
2809 enum node_stat_item idx, int nr)
2810{
2811 struct lruvec *lruvec;
2812
2813 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2814 mod_memcg_lruvec_state(lruvec, idx, nr);
2815}
2816#endif
2817
2818static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2819 struct pglist_data *pgdat,
2820 enum node_stat_item idx, int nr)
2821{
2822 struct mem_cgroup *memcg;
2823
2824 rcu_read_lock();
2825 memcg = obj_cgroup_memcg(objcg);
2826 account_slab_nmi_safe(memcg, pgdat, idx, nr);
2827 rcu_read_unlock();
2828}
2829
2830static __always_inline
2831struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
2832{
2833 /*
2834 * Slab objects are accounted individually, not per-page.
2835 * Memcg membership data for each individual object is saved in
2836 * slab->obj_exts.
2837 */
2838 unsigned long obj_exts;
2839 struct slabobj_ext *obj_ext;
2840 unsigned int off;
2841
2842 obj_exts = slab_obj_exts(slab);
2843 if (!obj_exts)
2844 return NULL;
2845
2846 get_slab_obj_exts(obj_exts);
2847 off = obj_to_index(slab->slab_cache, slab, p);
2848 obj_ext = slab_obj_ext(slab, obj_exts, off);
2849 if (obj_ext->objcg) {
2850 struct obj_cgroup *objcg = obj_ext->objcg;
2851
2852 put_slab_obj_exts(obj_exts);
2853 return obj_cgroup_memcg(objcg);
2854 }
2855 put_slab_obj_exts(obj_exts);
2856
2857 return NULL;
2858}
2859
2860/*
2861 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2862 * It is not suitable for objects allocated using vmalloc().
2863 *
2864 * A passed kernel object must be a slab object or a generic kernel page.
2865 *
2866 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2867 * cgroup_mutex, etc.
2868 */
2869struct mem_cgroup *mem_cgroup_from_virt(void *p)
2870{
2871 struct slab *slab;
2872
2873 if (mem_cgroup_disabled())
2874 return NULL;
2875
2876 slab = virt_to_slab(p);
2877 if (slab)
2878 return mem_cgroup_from_obj_slab(slab, p);
2879 return folio_memcg_check(virt_to_folio(p));
2880}
2881
2882static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2883{
2884 int nid = numa_node_id();
2885
2886 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2887 struct obj_cgroup *objcg = rcu_dereference(memcg->nodeinfo[nid]->objcg);
2888
2889 if (likely(objcg && obj_cgroup_tryget(objcg)))
2890 return objcg;
2891 }
2892
2893 return NULL;
2894}
2895
2896static inline struct obj_cgroup *get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2897{
2898 struct obj_cgroup *objcg;
2899
2900 rcu_read_lock();
2901 objcg = __get_obj_cgroup_from_memcg(memcg);
2902 rcu_read_unlock();
2903
2904 return objcg;
2905}
2906
2907static struct obj_cgroup *current_objcg_update(void)
2908{
2909 struct mem_cgroup *memcg;
2910 struct obj_cgroup *old, *objcg = NULL;
2911
2912 do {
2913 /* Atomically drop the update bit. */
2914 old = xchg(¤t->objcg, NULL);
2915 if (old) {
2916 old = (struct obj_cgroup *)
2917 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2918 obj_cgroup_put(old);
2919
2920 old = NULL;
2921 }
2922
2923 /* If new objcg is NULL, no reason for the second atomic update. */
2924 if (!current->mm || (current->flags & PF_KTHREAD))
2925 return NULL;
2926
2927 /*
2928 * Release the objcg pointer from the previous iteration,
2929 * if try_cmpxcg() below fails.
2930 */
2931 if (unlikely(objcg)) {
2932 obj_cgroup_put(objcg);
2933 objcg = NULL;
2934 }
2935
2936 /*
2937 * Obtain the new objcg pointer. The current task can be
2938 * asynchronously moved to another memcg and the previous
2939 * memcg can be offlined. So let's get the memcg pointer
2940 * and try get a reference to objcg under a rcu read lock.
2941 */
2942
2943 rcu_read_lock();
2944 memcg = mem_cgroup_from_task(current);
2945 objcg = __get_obj_cgroup_from_memcg(memcg);
2946 rcu_read_unlock();
2947
2948 /*
2949 * Try set up a new objcg pointer atomically. If it
2950 * fails, it means the update flag was set concurrently, so
2951 * the whole procedure should be repeated.
2952 */
2953 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2954
2955 return objcg;
2956}
2957
2958__always_inline struct obj_cgroup *current_obj_cgroup(void)
2959{
2960 struct mem_cgroup *memcg;
2961 struct obj_cgroup *objcg;
2962 int nid = numa_node_id();
2963
2964 if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi())
2965 return NULL;
2966
2967 if (in_task()) {
2968 memcg = current->active_memcg;
2969 if (unlikely(memcg))
2970 goto from_memcg;
2971
2972 objcg = READ_ONCE(current->objcg);
2973 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2974 objcg = current_objcg_update();
2975 /*
2976 * Objcg reference is kept by the task, so it's safe
2977 * to use the objcg by the current task.
2978 */
2979 return objcg ? : rcu_dereference_check(root_mem_cgroup->nodeinfo[nid]->objcg, 1);
2980 }
2981
2982 memcg = this_cpu_read(int_active_memcg);
2983 if (unlikely(memcg))
2984 goto from_memcg;
2985
2986 return rcu_dereference_check(root_mem_cgroup->nodeinfo[nid]->objcg, 1);
2987
2988from_memcg:
2989 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2990 /*
2991 * Memcg pointer is protected by scope (see set_active_memcg())
2992 * and is pinning the corresponding objcg, so objcg can't go
2993 * away and can be used within the scope without any additional
2994 * protection.
2995 */
2996 objcg = rcu_dereference_check(memcg->nodeinfo[nid]->objcg, 1);
2997 if (likely(objcg))
2998 return objcg;
2999 }
3000
3001 return rcu_dereference_check(root_mem_cgroup->nodeinfo[nid]->objcg, 1);
3002}
3003
3004struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3005{
3006 struct obj_cgroup *objcg;
3007
3008 objcg = folio_objcg(folio);
3009 if (objcg)
3010 obj_cgroup_get(objcg);
3011
3012 return objcg;
3013}
3014
3015#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
3016static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
3017{
3018 if (likely(!in_nmi())) {
3019 mod_memcg_state(memcg, MEMCG_KMEM, val);
3020 } else {
3021 /* preemption is disabled in_nmi(). */
3022 css_rstat_updated(&memcg->css, smp_processor_id());
3023 atomic_add(val, &memcg->kmem_stat);
3024 }
3025}
3026#else
3027static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
3028{
3029 mod_memcg_state(memcg, MEMCG_KMEM, val);
3030}
3031#endif
3032
3033/*
3034 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3035 * @objcg: object cgroup to uncharge
3036 * @nr_pages: number of pages to uncharge
3037 */
3038static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3039 unsigned int nr_pages)
3040{
3041 struct mem_cgroup *memcg;
3042
3043 memcg = get_mem_cgroup_from_objcg(objcg);
3044
3045 account_kmem_nmi_safe(memcg, -nr_pages);
3046 memcg1_account_kmem(memcg, -nr_pages);
3047 if (!mem_cgroup_is_root(memcg))
3048 refill_stock(memcg, nr_pages);
3049
3050 css_put(&memcg->css);
3051}
3052
3053/*
3054 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3055 * @objcg: object cgroup to charge
3056 * @gfp: reclaim mode
3057 * @nr_pages: number of pages to charge
3058 *
3059 * Returns 0 on success, an error code on failure.
3060 */
3061static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3062 unsigned int nr_pages)
3063{
3064 struct mem_cgroup *memcg;
3065 int ret;
3066
3067 memcg = get_mem_cgroup_from_objcg(objcg);
3068
3069 ret = try_charge_memcg(memcg, gfp, nr_pages);
3070 if (ret)
3071 goto out;
3072
3073 account_kmem_nmi_safe(memcg, nr_pages);
3074 memcg1_account_kmem(memcg, nr_pages);
3075out:
3076 css_put(&memcg->css);
3077
3078 return ret;
3079}
3080
3081static struct obj_cgroup *page_objcg(const struct page *page)
3082{
3083 unsigned long memcg_data = page->memcg_data;
3084
3085 if (mem_cgroup_disabled() || !memcg_data)
3086 return NULL;
3087
3088 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
3089 page);
3090 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
3091}
3092
3093static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
3094{
3095 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
3096}
3097
3098/**
3099 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3100 * @page: page to charge
3101 * @gfp: reclaim mode
3102 * @order: allocation order
3103 *
3104 * Returns 0 on success, an error code on failure.
3105 */
3106int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3107{
3108 struct obj_cgroup *objcg;
3109 int ret = 0;
3110
3111 objcg = current_obj_cgroup();
3112 if (objcg && !obj_cgroup_is_root(objcg)) {
3113 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3114 if (!ret) {
3115 obj_cgroup_get(objcg);
3116 page_set_objcg(page, objcg);
3117 return 0;
3118 }
3119 }
3120 return ret;
3121}
3122
3123/**
3124 * __memcg_kmem_uncharge_page: uncharge a kmem page
3125 * @page: page to uncharge
3126 * @order: allocation order
3127 */
3128void __memcg_kmem_uncharge_page(struct page *page, int order)
3129{
3130 struct obj_cgroup *objcg = page_objcg(page);
3131 unsigned int nr_pages = 1 << order;
3132
3133 if (!objcg)
3134 return;
3135
3136 obj_cgroup_uncharge_pages(objcg, nr_pages);
3137 page->memcg_data = 0;
3138 obj_cgroup_put(objcg);
3139}
3140
3141static struct obj_stock_pcp *trylock_stock(void)
3142{
3143 if (local_trylock(&obj_stock.lock))
3144 return this_cpu_ptr(&obj_stock);
3145
3146 return NULL;
3147}
3148
3149static void unlock_stock(struct obj_stock_pcp *stock)
3150{
3151 if (stock)
3152 local_unlock(&obj_stock.lock);
3153}
3154
3155/* Call after __refill_obj_stock() to ensure stock->cached_objg == objcg */
3156static void __account_obj_stock(struct obj_cgroup *objcg,
3157 struct obj_stock_pcp *stock, int nr,
3158 struct pglist_data *pgdat, enum node_stat_item idx)
3159{
3160 int *bytes;
3161
3162 if (!stock || READ_ONCE(stock->cached_objcg) != objcg)
3163 goto direct;
3164
3165 /*
3166 * Save vmstat data in stock and skip vmstat array update unless
3167 * accumulating over a page of vmstat data or when pgdat changes.
3168 */
3169 if (stock->cached_pgdat != pgdat) {
3170 /* Flush the existing cached vmstat data */
3171 struct pglist_data *oldpg = stock->cached_pgdat;
3172
3173 if (stock->nr_slab_reclaimable_b) {
3174 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3175 stock->nr_slab_reclaimable_b);
3176 stock->nr_slab_reclaimable_b = 0;
3177 }
3178 if (stock->nr_slab_unreclaimable_b) {
3179 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3180 stock->nr_slab_unreclaimable_b);
3181 stock->nr_slab_unreclaimable_b = 0;
3182 }
3183 stock->cached_pgdat = pgdat;
3184 }
3185
3186 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3187 : &stock->nr_slab_unreclaimable_b;
3188 /*
3189 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3190 * cached locally at least once before pushing it out.
3191 */
3192 if (!*bytes) {
3193 *bytes = nr;
3194 nr = 0;
3195 } else {
3196 *bytes += nr;
3197 if (abs(*bytes) > PAGE_SIZE) {
3198 nr = *bytes;
3199 *bytes = 0;
3200 } else {
3201 nr = 0;
3202 }
3203 }
3204direct:
3205 if (nr)
3206 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3207}
3208
3209static bool __consume_obj_stock(struct obj_cgroup *objcg,
3210 struct obj_stock_pcp *stock,
3211 unsigned int nr_bytes)
3212{
3213 if (objcg == READ_ONCE(stock->cached_objcg) &&
3214 stock->nr_bytes >= nr_bytes) {
3215 stock->nr_bytes -= nr_bytes;
3216 return true;
3217 }
3218
3219 return false;
3220}
3221
3222static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3223{
3224 struct obj_stock_pcp *stock;
3225 bool ret = false;
3226
3227 stock = trylock_stock();
3228 if (!stock)
3229 return ret;
3230
3231 ret = __consume_obj_stock(objcg, stock, nr_bytes);
3232 unlock_stock(stock);
3233
3234 return ret;
3235}
3236
3237static void drain_obj_stock(struct obj_stock_pcp *stock)
3238{
3239 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3240
3241 if (!old)
3242 return;
3243
3244 if (stock->nr_bytes) {
3245 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3246 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3247
3248 if (nr_pages) {
3249 struct mem_cgroup *memcg;
3250
3251 memcg = get_mem_cgroup_from_objcg(old);
3252
3253 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
3254 memcg1_account_kmem(memcg, -nr_pages);
3255 if (!mem_cgroup_is_root(memcg))
3256 memcg_uncharge(memcg, nr_pages);
3257
3258 css_put(&memcg->css);
3259 }
3260
3261 /*
3262 * The leftover is flushed to the centralized per-memcg value.
3263 * On the next attempt to refill obj stock it will be moved
3264 * to a per-cpu stock (probably, on an other CPU), see
3265 * refill_obj_stock().
3266 *
3267 * How often it's flushed is a trade-off between the memory
3268 * limit enforcement accuracy and potential CPU contention,
3269 * so it might be changed in the future.
3270 */
3271 atomic_add(nr_bytes, &old->nr_charged_bytes);
3272 stock->nr_bytes = 0;
3273 }
3274
3275 /*
3276 * Flush the vmstat data in current stock
3277 */
3278 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3279 if (stock->nr_slab_reclaimable_b) {
3280 mod_objcg_mlstate(old, stock->cached_pgdat,
3281 NR_SLAB_RECLAIMABLE_B,
3282 stock->nr_slab_reclaimable_b);
3283 stock->nr_slab_reclaimable_b = 0;
3284 }
3285 if (stock->nr_slab_unreclaimable_b) {
3286 mod_objcg_mlstate(old, stock->cached_pgdat,
3287 NR_SLAB_UNRECLAIMABLE_B,
3288 stock->nr_slab_unreclaimable_b);
3289 stock->nr_slab_unreclaimable_b = 0;
3290 }
3291 stock->cached_pgdat = NULL;
3292 }
3293
3294 WRITE_ONCE(stock->cached_objcg, NULL);
3295 obj_cgroup_put(old);
3296}
3297
3298static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
3299 struct mem_cgroup *root_memcg)
3300{
3301 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3302 struct mem_cgroup *memcg;
3303 bool flush = false;
3304
3305 rcu_read_lock();
3306 if (objcg) {
3307 memcg = obj_cgroup_memcg(objcg);
3308 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3309 flush = true;
3310 }
3311 rcu_read_unlock();
3312
3313 return flush;
3314}
3315
3316static void __refill_obj_stock(struct obj_cgroup *objcg,
3317 struct obj_stock_pcp *stock,
3318 unsigned int nr_bytes,
3319 bool allow_uncharge)
3320{
3321 unsigned int nr_pages = 0;
3322
3323 if (!stock) {
3324 nr_pages = nr_bytes >> PAGE_SHIFT;
3325 nr_bytes = nr_bytes & (PAGE_SIZE - 1);
3326 atomic_add(nr_bytes, &objcg->nr_charged_bytes);
3327 goto out;
3328 }
3329
3330 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3331 drain_obj_stock(stock);
3332 obj_cgroup_get(objcg);
3333 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3334 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3335 WRITE_ONCE(stock->cached_objcg, objcg);
3336
3337 allow_uncharge = true; /* Allow uncharge when objcg changes */
3338 }
3339 stock->nr_bytes += nr_bytes;
3340
3341 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3342 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3343 stock->nr_bytes &= (PAGE_SIZE - 1);
3344 }
3345
3346out:
3347 if (nr_pages)
3348 obj_cgroup_uncharge_pages(objcg, nr_pages);
3349}
3350
3351static void refill_obj_stock(struct obj_cgroup *objcg,
3352 unsigned int nr_bytes,
3353 bool allow_uncharge)
3354{
3355 struct obj_stock_pcp *stock = trylock_stock();
3356 __refill_obj_stock(objcg, stock, nr_bytes, allow_uncharge);
3357 unlock_stock(stock);
3358}
3359
3360static int __obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp,
3361 size_t size, size_t *remainder)
3362{
3363 size_t charge_size;
3364 int ret;
3365
3366 charge_size = PAGE_ALIGN(size);
3367 ret = obj_cgroup_charge_pages(objcg, gfp, charge_size >> PAGE_SHIFT);
3368 if (!ret)
3369 *remainder = charge_size - size;
3370
3371 return ret;
3372}
3373
3374int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3375{
3376 size_t remainder;
3377 int ret;
3378
3379 if (likely(consume_obj_stock(objcg, size)))
3380 return 0;
3381
3382 /*
3383 * In theory, objcg->nr_charged_bytes can have enough
3384 * pre-charged bytes to satisfy the allocation. However,
3385 * flushing objcg->nr_charged_bytes requires two atomic
3386 * operations, and objcg->nr_charged_bytes can't be big.
3387 * The shared objcg->nr_charged_bytes can also become a
3388 * performance bottleneck if all tasks of the same memcg are
3389 * trying to update it. So it's better to ignore it and try
3390 * grab some new pages. The stock's nr_bytes will be flushed to
3391 * objcg->nr_charged_bytes later on when objcg changes.
3392 *
3393 * The stock's nr_bytes may contain enough pre-charged bytes
3394 * to allow one less page from being charged, but we can't rely
3395 * on the pre-charged bytes not being changed outside of
3396 * consume_obj_stock() or refill_obj_stock(). So ignore those
3397 * pre-charged bytes as well when charging pages. To avoid a
3398 * page uncharge right after a page charge, we set the
3399 * allow_uncharge flag to false when calling refill_obj_stock()
3400 * to temporarily allow the pre-charged bytes to exceed the page
3401 * size limit. The maximum reachable value of the pre-charged
3402 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3403 * race.
3404 */
3405 ret = __obj_cgroup_charge(objcg, gfp, size, &remainder);
3406 if (!ret && remainder)
3407 refill_obj_stock(objcg, remainder, false);
3408
3409 return ret;
3410}
3411
3412void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3413{
3414 refill_obj_stock(objcg, size, true);
3415}
3416
3417static inline size_t obj_full_size(struct kmem_cache *s)
3418{
3419 /*
3420 * For each accounted object there is an extra space which is used
3421 * to store obj_cgroup membership. Charge it too.
3422 */
3423 return s->size + sizeof(struct obj_cgroup *);
3424}
3425
3426bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3427 gfp_t flags, size_t size, void **p)
3428{
3429 size_t obj_size = obj_full_size(s);
3430 struct obj_cgroup *objcg;
3431 struct slab *slab;
3432 unsigned long off;
3433 size_t i;
3434
3435 /*
3436 * The obtained objcg pointer is safe to use within the current scope,
3437 * defined by current task or set_active_memcg() pair.
3438 * obj_cgroup_get() is used to get a permanent reference.
3439 */
3440 objcg = current_obj_cgroup();
3441 if (!objcg || obj_cgroup_is_root(objcg))
3442 return true;
3443
3444 /*
3445 * slab_alloc_node() avoids the NULL check, so we might be called with a
3446 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3447 * the whole requested size.
3448 * return success as there's nothing to free back
3449 */
3450 if (unlikely(*p == NULL))
3451 return true;
3452
3453 flags &= gfp_allowed_mask;
3454
3455 if (lru) {
3456 int ret;
3457 struct mem_cgroup *memcg;
3458
3459 memcg = get_mem_cgroup_from_objcg(objcg);
3460 ret = memcg_list_lru_alloc(memcg, lru, flags);
3461 css_put(&memcg->css);
3462
3463 if (ret)
3464 return false;
3465 }
3466
3467 for (i = 0; i < size; i++) {
3468 unsigned long obj_exts;
3469 struct slabobj_ext *obj_ext;
3470 struct obj_stock_pcp *stock;
3471
3472 slab = virt_to_slab(p[i]);
3473
3474 if (!slab_obj_exts(slab) &&
3475 alloc_slab_obj_exts(slab, s, flags, false)) {
3476 continue;
3477 }
3478
3479 /*
3480 * if we fail and size is 1, memcg_alloc_abort_single() will
3481 * just free the object, which is ok as we have not assigned
3482 * objcg to its obj_ext yet
3483 *
3484 * for larger sizes, kmem_cache_free_bulk() will uncharge
3485 * any objects that were already charged and obj_ext assigned
3486 *
3487 * TODO: we could batch this until slab_pgdat(slab) changes
3488 * between iterations, with a more complicated undo
3489 */
3490 stock = trylock_stock();
3491 if (!stock || !__consume_obj_stock(objcg, stock, obj_size)) {
3492 size_t remainder;
3493
3494 unlock_stock(stock);
3495 if (__obj_cgroup_charge(objcg, flags, obj_size, &remainder))
3496 return false;
3497 stock = trylock_stock();
3498 if (remainder)
3499 __refill_obj_stock(objcg, stock, remainder, false);
3500 }
3501 __account_obj_stock(objcg, stock, obj_size,
3502 slab_pgdat(slab), cache_vmstat_idx(s));
3503 unlock_stock(stock);
3504
3505 obj_exts = slab_obj_exts(slab);
3506 get_slab_obj_exts(obj_exts);
3507 off = obj_to_index(s, slab, p[i]);
3508 obj_ext = slab_obj_ext(slab, obj_exts, off);
3509 obj_cgroup_get(objcg);
3510 obj_ext->objcg = objcg;
3511 put_slab_obj_exts(obj_exts);
3512 }
3513
3514 return true;
3515}
3516
3517void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3518 void **p, int objects, unsigned long obj_exts)
3519{
3520 size_t obj_size = obj_full_size(s);
3521
3522 for (int i = 0; i < objects; i++) {
3523 struct obj_cgroup *objcg;
3524 struct slabobj_ext *obj_ext;
3525 struct obj_stock_pcp *stock;
3526 unsigned int off;
3527
3528 off = obj_to_index(s, slab, p[i]);
3529 obj_ext = slab_obj_ext(slab, obj_exts, off);
3530 objcg = obj_ext->objcg;
3531 if (!objcg)
3532 continue;
3533
3534 obj_ext->objcg = NULL;
3535
3536 stock = trylock_stock();
3537 __refill_obj_stock(objcg, stock, obj_size, true);
3538 __account_obj_stock(objcg, stock, -obj_size,
3539 slab_pgdat(slab), cache_vmstat_idx(s));
3540 unlock_stock(stock);
3541
3542 obj_cgroup_put(objcg);
3543 }
3544}
3545
3546/*
3547 * The objcg is only set on the first page, so transfer it to all the
3548 * other pages.
3549 */
3550void split_page_memcg(struct page *page, unsigned order)
3551{
3552 struct obj_cgroup *objcg = page_objcg(page);
3553 unsigned int i, nr = 1 << order;
3554
3555 if (!objcg)
3556 return;
3557
3558 for (i = 1; i < nr; i++)
3559 page_set_objcg(&page[i], objcg);
3560
3561 obj_cgroup_get_many(objcg, nr - 1);
3562}
3563
3564void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3565 unsigned new_order)
3566{
3567 unsigned new_refs;
3568
3569 if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3570 return;
3571
3572 new_refs = (1 << (old_order - new_order)) - 1;
3573 obj_cgroup_get_many(folio_objcg(folio), new_refs);
3574}
3575
3576static void memcg_online_kmem(struct mem_cgroup *memcg)
3577{
3578 if (mem_cgroup_kmem_disabled())
3579 return;
3580
3581 if (unlikely(mem_cgroup_is_root(memcg)))
3582 return;
3583
3584 static_branch_enable(&memcg_kmem_online_key);
3585
3586 memcg->kmemcg_id = memcg->id.id;
3587}
3588
3589static void memcg_offline_kmem(struct mem_cgroup *memcg)
3590{
3591 struct mem_cgroup *parent;
3592
3593 if (mem_cgroup_kmem_disabled())
3594 return;
3595
3596 if (unlikely(mem_cgroup_is_root(memcg)))
3597 return;
3598
3599 parent = parent_mem_cgroup(memcg);
3600 memcg_reparent_list_lrus(memcg, parent);
3601}
3602
3603#ifdef CONFIG_CGROUP_WRITEBACK
3604
3605#include <trace/events/writeback.h>
3606
3607static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3608{
3609 return wb_domain_init(&memcg->cgwb_domain, gfp);
3610}
3611
3612static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3613{
3614 wb_domain_exit(&memcg->cgwb_domain);
3615}
3616
3617static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3618{
3619 wb_domain_size_changed(&memcg->cgwb_domain);
3620}
3621
3622struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3623{
3624 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3625
3626 if (!memcg->css.parent)
3627 return NULL;
3628
3629 return &memcg->cgwb_domain;
3630}
3631
3632/**
3633 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3634 * @wb: bdi_writeback in question
3635 * @pfilepages: out parameter for number of file pages
3636 * @pheadroom: out parameter for number of allocatable pages according to memcg
3637 * @pdirty: out parameter for number of dirty pages
3638 * @pwriteback: out parameter for number of pages under writeback
3639 *
3640 * Determine the numbers of file, headroom, dirty, and writeback pages in
3641 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3642 * is a bit more involved.
3643 *
3644 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3645 * headroom is calculated as the lowest headroom of itself and the
3646 * ancestors. Note that this doesn't consider the actual amount of
3647 * available memory in the system. The caller should further cap
3648 * *@pheadroom accordingly.
3649 */
3650void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3651 unsigned long *pheadroom, unsigned long *pdirty,
3652 unsigned long *pwriteback)
3653{
3654 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3655 struct mem_cgroup *parent;
3656
3657 mem_cgroup_flush_stats_ratelimited(memcg);
3658
3659 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3660 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3661 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3662 memcg_page_state(memcg, NR_ACTIVE_FILE);
3663
3664 *pheadroom = PAGE_COUNTER_MAX;
3665 while ((parent = parent_mem_cgroup(memcg))) {
3666 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3667 READ_ONCE(memcg->memory.high));
3668 unsigned long used = page_counter_read(&memcg->memory);
3669
3670 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3671 memcg = parent;
3672 }
3673}
3674
3675/*
3676 * Foreign dirty flushing
3677 *
3678 * There's an inherent mismatch between memcg and writeback. The former
3679 * tracks ownership per-page while the latter per-inode. This was a
3680 * deliberate design decision because honoring per-page ownership in the
3681 * writeback path is complicated, may lead to higher CPU and IO overheads
3682 * and deemed unnecessary given that write-sharing an inode across
3683 * different cgroups isn't a common use-case.
3684 *
3685 * Combined with inode majority-writer ownership switching, this works well
3686 * enough in most cases but there are some pathological cases. For
3687 * example, let's say there are two cgroups A and B which keep writing to
3688 * different but confined parts of the same inode. B owns the inode and
3689 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3690 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3691 * triggering background writeback. A will be slowed down without a way to
3692 * make writeback of the dirty pages happen.
3693 *
3694 * Conditions like the above can lead to a cgroup getting repeatedly and
3695 * severely throttled after making some progress after each
3696 * dirty_expire_interval while the underlying IO device is almost
3697 * completely idle.
3698 *
3699 * Solving this problem completely requires matching the ownership tracking
3700 * granularities between memcg and writeback in either direction. However,
3701 * the more egregious behaviors can be avoided by simply remembering the
3702 * most recent foreign dirtying events and initiating remote flushes on
3703 * them when local writeback isn't enough to keep the memory clean enough.
3704 *
3705 * The following two functions implement such mechanism. When a foreign
3706 * page - a page whose memcg and writeback ownerships don't match - is
3707 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3708 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3709 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3710 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3711 * foreign bdi_writebacks which haven't expired. Both the numbers of
3712 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3713 * limited to MEMCG_CGWB_FRN_CNT.
3714 *
3715 * The mechanism only remembers IDs and doesn't hold any object references.
3716 * As being wrong occasionally doesn't matter, updates and accesses to the
3717 * records are lockless and racy.
3718 */
3719void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3720 struct bdi_writeback *wb)
3721{
3722 struct mem_cgroup *memcg = folio_memcg(folio);
3723 struct memcg_cgwb_frn *frn;
3724 u64 now = get_jiffies_64();
3725 u64 oldest_at = now;
3726 int oldest = -1;
3727 int i;
3728
3729 trace_track_foreign_dirty(folio, wb);
3730
3731 /*
3732 * Pick the slot to use. If there is already a slot for @wb, keep
3733 * using it. If not replace the oldest one which isn't being
3734 * written out.
3735 */
3736 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3737 frn = &memcg->cgwb_frn[i];
3738 if (frn->bdi_id == wb->bdi->id &&
3739 frn->memcg_id == wb->memcg_css->id)
3740 break;
3741 if (time_before64(frn->at, oldest_at) &&
3742 atomic_read(&frn->done.cnt) == 1) {
3743 oldest = i;
3744 oldest_at = frn->at;
3745 }
3746 }
3747
3748 if (i < MEMCG_CGWB_FRN_CNT) {
3749 /*
3750 * Re-using an existing one. Update timestamp lazily to
3751 * avoid making the cacheline hot. We want them to be
3752 * reasonably up-to-date and significantly shorter than
3753 * dirty_expire_interval as that's what expires the record.
3754 * Use the shorter of 1s and dirty_expire_interval / 8.
3755 */
3756 unsigned long update_intv =
3757 min_t(unsigned long, HZ,
3758 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3759
3760 if (time_before64(frn->at, now - update_intv))
3761 frn->at = now;
3762 } else if (oldest >= 0) {
3763 /* replace the oldest free one */
3764 frn = &memcg->cgwb_frn[oldest];
3765 frn->bdi_id = wb->bdi->id;
3766 frn->memcg_id = wb->memcg_css->id;
3767 frn->at = now;
3768 }
3769}
3770
3771/* issue foreign writeback flushes for recorded foreign dirtying events */
3772void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3773{
3774 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3775 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3776 u64 now = jiffies_64;
3777 int i;
3778
3779 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3780 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3781
3782 /*
3783 * If the record is older than dirty_expire_interval,
3784 * writeback on it has already started. No need to kick it
3785 * off again. Also, don't start a new one if there's
3786 * already one in flight.
3787 */
3788 if (time_after64(frn->at, now - intv) &&
3789 atomic_read(&frn->done.cnt) == 1) {
3790 frn->at = 0;
3791 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3792 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3793 WB_REASON_FOREIGN_FLUSH,
3794 &frn->done);
3795 }
3796 }
3797}
3798
3799#else /* CONFIG_CGROUP_WRITEBACK */
3800
3801static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3802{
3803 return 0;
3804}
3805
3806static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3807{
3808}
3809
3810static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3811{
3812}
3813
3814#endif /* CONFIG_CGROUP_WRITEBACK */
3815
3816/*
3817 * Private memory cgroup IDR
3818 *
3819 * Swap-out records and page cache shadow entries need to store memcg
3820 * references in constrained space, so we maintain an ID space that is
3821 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3822 * memory-controlled cgroups to 64k.
3823 *
3824 * However, there usually are many references to the offline CSS after
3825 * the cgroup has been destroyed, such as page cache or reclaimable
3826 * slab objects, that don't need to hang on to the ID. We want to keep
3827 * those dead CSS from occupying IDs, or we might quickly exhaust the
3828 * relatively small ID space and prevent the creation of new cgroups
3829 * even when there are much fewer than 64k cgroups - possibly none.
3830 *
3831 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3832 * be freed and recycled when it's no longer needed, which is usually
3833 * when the CSS is offlined.
3834 *
3835 * The only exception to that are records of swapped out tmpfs/shmem
3836 * pages that need to be attributed to live ancestors on swapin. But
3837 * those references are manageable from userspace.
3838 */
3839
3840#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3841static DEFINE_XARRAY_ALLOC1(mem_cgroup_private_ids);
3842
3843static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
3844{
3845 if (memcg->id.id > 0) {
3846 xa_erase(&mem_cgroup_private_ids, memcg->id.id);
3847 memcg->id.id = 0;
3848 }
3849}
3850
3851static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg, unsigned int n)
3852{
3853 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3854 mem_cgroup_private_id_remove(memcg);
3855
3856 /* Memcg ID pins CSS */
3857 css_put(&memcg->css);
3858 }
3859}
3860
3861struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg, unsigned int n)
3862{
3863 while (!refcount_add_not_zero(n, &memcg->id.ref)) {
3864 /*
3865 * The root cgroup cannot be destroyed, so it's refcount must
3866 * always be >= 1.
3867 */
3868 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3869 VM_BUG_ON(1);
3870 break;
3871 }
3872 memcg = parent_mem_cgroup(memcg);
3873 }
3874 return memcg;
3875}
3876
3877/**
3878 * mem_cgroup_from_private_id - look up a memcg from a memcg id
3879 * @id: the memcg id to look up
3880 *
3881 * Caller must hold rcu_read_lock().
3882 */
3883struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
3884{
3885 WARN_ON_ONCE(!rcu_read_lock_held());
3886 return xa_load(&mem_cgroup_private_ids, id);
3887}
3888
3889struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
3890{
3891 struct cgroup *cgrp;
3892 struct cgroup_subsys_state *css;
3893 struct mem_cgroup *memcg = NULL;
3894
3895 cgrp = cgroup_get_from_id(id);
3896 if (IS_ERR(cgrp))
3897 return NULL;
3898
3899 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3900 if (css)
3901 memcg = container_of(css, struct mem_cgroup, css);
3902
3903 cgroup_put(cgrp);
3904
3905 return memcg;
3906}
3907
3908static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3909{
3910 if (!pn)
3911 return;
3912
3913 free_percpu(pn->lruvec_stats_percpu);
3914 kfree(pn->lruvec_stats);
3915 kfree(pn);
3916}
3917
3918static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3919{
3920 struct mem_cgroup_per_node *pn;
3921
3922 pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
3923 node);
3924 if (!pn)
3925 return false;
3926
3927 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3928 GFP_KERNEL_ACCOUNT, node);
3929 if (!pn->lruvec_stats)
3930 goto fail;
3931
3932 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3933 GFP_KERNEL_ACCOUNT);
3934 if (!pn->lruvec_stats_percpu)
3935 goto fail;
3936
3937 INIT_LIST_HEAD(&pn->objcg_list);
3938
3939 lruvec_init(&pn->lruvec);
3940 pn->memcg = memcg;
3941
3942 memcg->nodeinfo[node] = pn;
3943 return true;
3944fail:
3945 free_mem_cgroup_per_node_info(pn);
3946 return false;
3947}
3948
3949static void __mem_cgroup_free(struct mem_cgroup *memcg)
3950{
3951 int node;
3952
3953 for_each_node(node) {
3954 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3955 if (!pn)
3956 continue;
3957
3958 obj_cgroup_put(pn->orig_objcg);
3959 free_mem_cgroup_per_node_info(pn);
3960 }
3961 memcg1_free_events(memcg);
3962 kfree(memcg->vmstats);
3963 free_percpu(memcg->vmstats_percpu);
3964 kfree(memcg);
3965}
3966
3967static void mem_cgroup_free(struct mem_cgroup *memcg)
3968{
3969 lru_gen_exit_memcg(memcg);
3970 memcg_wb_domain_exit(memcg);
3971 __mem_cgroup_free(memcg);
3972}
3973
3974static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3975{
3976 struct memcg_vmstats_percpu *statc;
3977 struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
3978 struct mem_cgroup *memcg;
3979 int node, cpu;
3980 int __maybe_unused i;
3981 long error;
3982
3983 memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
3984 if (!memcg)
3985 return ERR_PTR(-ENOMEM);
3986
3987 error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
3988 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3989 if (error)
3990 goto fail;
3991 error = -ENOMEM;
3992
3993 memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT);
3994 if (!memcg->vmstats)
3995 goto fail;
3996
3997 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3998 GFP_KERNEL_ACCOUNT);
3999 if (!memcg->vmstats_percpu)
4000 goto fail;
4001
4002 if (!memcg1_alloc_events(memcg))
4003 goto fail;
4004
4005 for_each_possible_cpu(cpu) {
4006 if (parent)
4007 pstatc_pcpu = parent->vmstats_percpu;
4008 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4009 statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
4010 statc->vmstats = memcg->vmstats;
4011 }
4012
4013 for_each_node(node)
4014 if (!alloc_mem_cgroup_per_node_info(memcg, node))
4015 goto fail;
4016
4017 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4018 goto fail;
4019
4020 INIT_WORK(&memcg->high_work, high_work_func);
4021 vmpressure_init(&memcg->vmpressure);
4022 INIT_LIST_HEAD(&memcg->memory_peaks);
4023 INIT_LIST_HEAD(&memcg->swap_peaks);
4024 spin_lock_init(&memcg->peaks_lock);
4025 memcg->socket_pressure = get_jiffies_64();
4026#if BITS_PER_LONG < 64
4027 seqlock_init(&memcg->socket_pressure_seqlock);
4028#endif
4029 memcg1_memcg_init(memcg);
4030 memcg->kmemcg_id = -1;
4031#ifdef CONFIG_CGROUP_WRITEBACK
4032 INIT_LIST_HEAD(&memcg->cgwb_list);
4033 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
4034 memcg->cgwb_frn[i].done =
4035 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
4036#endif
4037#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4038 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
4039 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
4040 memcg->deferred_split_queue.split_queue_len = 0;
4041#endif
4042 lru_gen_init_memcg(memcg);
4043 return memcg;
4044fail:
4045 mem_cgroup_private_id_remove(memcg);
4046 __mem_cgroup_free(memcg);
4047 return ERR_PTR(error);
4048}
4049
4050static struct cgroup_subsys_state * __ref
4051mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4052{
4053 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4054 struct mem_cgroup *memcg, *old_memcg;
4055 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
4056
4057 old_memcg = set_active_memcg(parent);
4058 memcg = mem_cgroup_alloc(parent);
4059 set_active_memcg(old_memcg);
4060 if (IS_ERR(memcg))
4061 return ERR_CAST(memcg);
4062
4063 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
4064 memcg1_soft_limit_reset(memcg);
4065#ifdef CONFIG_ZSWAP
4066 memcg->zswap_max = PAGE_COUNTER_MAX;
4067 WRITE_ONCE(memcg->zswap_writeback, true);
4068#endif
4069 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
4070 if (parent) {
4071 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
4072
4073 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
4074 page_counter_init(&memcg->swap, &parent->swap, false);
4075#ifdef CONFIG_MEMCG_V1
4076 memcg->memory.track_failcnt = !memcg_on_dfl;
4077 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
4078 page_counter_init(&memcg->kmem, &parent->kmem, false);
4079 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
4080#endif
4081 } else {
4082 init_memcg_stats();
4083 init_memcg_events();
4084 page_counter_init(&memcg->memory, NULL, true);
4085 page_counter_init(&memcg->swap, NULL, false);
4086#ifdef CONFIG_MEMCG_V1
4087 page_counter_init(&memcg->kmem, NULL, false);
4088 page_counter_init(&memcg->tcpmem, NULL, false);
4089#endif
4090 root_mem_cgroup = memcg;
4091 return &memcg->css;
4092 }
4093
4094 if (memcg_on_dfl && !cgroup_memory_nosocket)
4095 static_branch_inc(&memcg_sockets_enabled_key);
4096
4097 if (!cgroup_memory_nobpf)
4098 static_branch_inc(&memcg_bpf_enabled_key);
4099
4100 return &memcg->css;
4101}
4102
4103static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4104{
4105 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4106 struct obj_cgroup *objcg;
4107 int nid;
4108
4109 memcg_online_kmem(memcg);
4110
4111 /*
4112 * A memcg must be visible for expand_shrinker_info()
4113 * by the time the maps are allocated. So, we allocate maps
4114 * here, when for_each_mem_cgroup() can't skip it.
4115 */
4116 if (alloc_shrinker_info(memcg))
4117 goto offline_kmem;
4118
4119 for_each_node(nid) {
4120 objcg = obj_cgroup_alloc();
4121 if (!objcg)
4122 goto free_objcg;
4123
4124 if (unlikely(mem_cgroup_is_root(memcg)))
4125 objcg->is_root = true;
4126
4127 objcg->memcg = memcg;
4128 rcu_assign_pointer(memcg->nodeinfo[nid]->objcg, objcg);
4129 obj_cgroup_get(objcg);
4130 memcg->nodeinfo[nid]->orig_objcg = objcg;
4131 }
4132
4133 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
4134 queue_delayed_work(system_dfl_wq, &stats_flush_dwork,
4135 FLUSH_TIME);
4136 lru_gen_online_memcg(memcg);
4137
4138 /* Online state pins memcg ID, memcg ID pins CSS */
4139 refcount_set(&memcg->id.ref, 1);
4140 css_get(css);
4141
4142 /*
4143 * Ensure mem_cgroup_from_private_id() works once we're fully online.
4144 *
4145 * We could do this earlier and require callers to filter with
4146 * css_tryget_online(). But right now there are no users that
4147 * need earlier access, and the workingset code relies on the
4148 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
4149 * publish it here at the end of onlining. This matches the
4150 * regular ID destruction during offlining.
4151 */
4152 xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
4153
4154 return 0;
4155free_objcg:
4156 for_each_node(nid) {
4157 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4158
4159 objcg = rcu_replace_pointer(pn->objcg, NULL, true);
4160 if (objcg)
4161 percpu_ref_kill(&objcg->refcnt);
4162
4163 if (pn->orig_objcg) {
4164 obj_cgroup_put(pn->orig_objcg);
4165 /*
4166 * Reset pn->orig_objcg to NULL to prevent
4167 * obj_cgroup_put() from being called again in
4168 * __mem_cgroup_free().
4169 */
4170 pn->orig_objcg = NULL;
4171 }
4172 }
4173 free_shrinker_info(memcg);
4174offline_kmem:
4175 memcg_offline_kmem(memcg);
4176 mem_cgroup_private_id_remove(memcg);
4177 return -ENOMEM;
4178}
4179
4180static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4181{
4182 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4183
4184 memcg1_css_offline(memcg);
4185
4186 page_counter_set_min(&memcg->memory, 0);
4187 page_counter_set_low(&memcg->memory, 0);
4188
4189 zswap_memcg_offline_cleanup(memcg);
4190
4191 memcg_offline_kmem(memcg);
4192 reparent_deferred_split_queue(memcg);
4193 /*
4194 * The reparenting of objcg must be after the reparenting of the
4195 * list_lru and deferred_split_queue above, which ensures that they will
4196 * not mistakenly get the parent list_lru and deferred_split_queue.
4197 */
4198 memcg_reparent_objcgs(memcg);
4199 reparent_shrinker_deferred(memcg);
4200 wb_memcg_offline(memcg);
4201 lru_gen_offline_memcg(memcg);
4202
4203 drain_all_stock(memcg);
4204
4205 mem_cgroup_private_id_put(memcg, 1);
4206}
4207
4208static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4209{
4210 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4211
4212 invalidate_reclaim_iterators(memcg);
4213 lru_gen_release_memcg(memcg);
4214}
4215
4216static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4217{
4218 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4219 int __maybe_unused i;
4220
4221#ifdef CONFIG_CGROUP_WRITEBACK
4222 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
4223 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
4224#endif
4225 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4226 static_branch_dec(&memcg_sockets_enabled_key);
4227
4228 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
4229 static_branch_dec(&memcg_sockets_enabled_key);
4230
4231 if (!cgroup_memory_nobpf)
4232 static_branch_dec(&memcg_bpf_enabled_key);
4233
4234 vmpressure_cleanup(&memcg->vmpressure);
4235 cancel_work_sync(&memcg->high_work);
4236 memcg1_remove_from_trees(memcg);
4237 free_shrinker_info(memcg);
4238 mem_cgroup_free(memcg);
4239}
4240
4241/**
4242 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4243 * @css: the target css
4244 *
4245 * Reset the states of the mem_cgroup associated with @css. This is
4246 * invoked when the userland requests disabling on the default hierarchy
4247 * but the memcg is pinned through dependency. The memcg should stop
4248 * applying policies and should revert to the vanilla state as it may be
4249 * made visible again.
4250 *
4251 * The current implementation only resets the essential configurations.
4252 * This needs to be expanded to cover all the visible parts.
4253 */
4254static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4255{
4256 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4257
4258 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
4259 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
4260#ifdef CONFIG_MEMCG_V1
4261 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
4262 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
4263#endif
4264 page_counter_set_min(&memcg->memory, 0);
4265 page_counter_set_low(&memcg->memory, 0);
4266 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
4267 memcg1_soft_limit_reset(memcg);
4268 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
4269 memcg_wb_domain_size_changed(memcg);
4270}
4271
4272struct aggregate_control {
4273 /* pointer to the aggregated (CPU and subtree aggregated) counters */
4274 long *aggregate;
4275 /* pointer to the non-hierarchichal (CPU aggregated) counters */
4276 long *local;
4277 /* pointer to the pending child counters during tree propagation */
4278 long *pending;
4279 /* pointer to the parent's pending counters, could be NULL */
4280 long *ppending;
4281 /* pointer to the percpu counters to be aggregated */
4282 long *cstat;
4283 /* pointer to the percpu counters of the last aggregation*/
4284 long *cstat_prev;
4285 /* size of the above counters */
4286 int size;
4287};
4288
4289static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
4290{
4291 int i;
4292 long delta, delta_cpu, v;
4293
4294 for (i = 0; i < ac->size; i++) {
4295 /*
4296 * Collect the aggregated propagation counts of groups
4297 * below us. We're in a per-cpu loop here and this is
4298 * a global counter, so the first cycle will get them.
4299 */
4300 delta = ac->pending[i];
4301 if (delta)
4302 ac->pending[i] = 0;
4303
4304 /* Add CPU changes on this level since the last flush */
4305 delta_cpu = 0;
4306 v = READ_ONCE(ac->cstat[i]);
4307 if (v != ac->cstat_prev[i]) {
4308 delta_cpu = v - ac->cstat_prev[i];
4309 delta += delta_cpu;
4310 ac->cstat_prev[i] = v;
4311 }
4312
4313 /* Aggregate counts on this level and propagate upwards */
4314 if (delta_cpu)
4315 ac->local[i] += delta_cpu;
4316
4317 if (delta) {
4318 ac->aggregate[i] += delta;
4319 if (ac->ppending)
4320 ac->ppending[i] += delta;
4321 }
4322 }
4323}
4324
4325#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
4326static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4327 int cpu)
4328{
4329 int nid;
4330
4331 if (atomic_read(&memcg->kmem_stat)) {
4332 int kmem = atomic_xchg(&memcg->kmem_stat, 0);
4333 int index = memcg_stats_index(MEMCG_KMEM);
4334
4335 memcg->vmstats->state[index] += kmem;
4336 if (parent)
4337 parent->vmstats->state_pending[index] += kmem;
4338 }
4339
4340 for_each_node_state(nid, N_MEMORY) {
4341 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4342 struct lruvec_stats *lstats = pn->lruvec_stats;
4343 struct lruvec_stats *plstats = NULL;
4344
4345 if (parent)
4346 plstats = parent->nodeinfo[nid]->lruvec_stats;
4347
4348 if (atomic_read(&pn->slab_reclaimable)) {
4349 int slab = atomic_xchg(&pn->slab_reclaimable, 0);
4350 int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B);
4351
4352 lstats->state[index] += slab;
4353 if (plstats)
4354 plstats->state_pending[index] += slab;
4355 }
4356 if (atomic_read(&pn->slab_unreclaimable)) {
4357 int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
4358 int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B);
4359
4360 lstats->state[index] += slab;
4361 if (plstats)
4362 plstats->state_pending[index] += slab;
4363 }
4364 }
4365}
4366#else
4367static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4368 int cpu)
4369{}
4370#endif
4371
4372static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
4373{
4374 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4375 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4376 struct memcg_vmstats_percpu *statc;
4377 struct aggregate_control ac;
4378 int nid;
4379
4380 flush_nmi_stats(memcg, parent, cpu);
4381
4382 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4383
4384 ac = (struct aggregate_control) {
4385 .aggregate = memcg->vmstats->state,
4386 .local = memcg->vmstats->state_local,
4387 .pending = memcg->vmstats->state_pending,
4388 .ppending = parent ? parent->vmstats->state_pending : NULL,
4389 .cstat = statc->state,
4390 .cstat_prev = statc->state_prev,
4391 .size = MEMCG_VMSTAT_SIZE,
4392 };
4393 mem_cgroup_stat_aggregate(&ac);
4394
4395 ac = (struct aggregate_control) {
4396 .aggregate = memcg->vmstats->events,
4397 .local = memcg->vmstats->events_local,
4398 .pending = memcg->vmstats->events_pending,
4399 .ppending = parent ? parent->vmstats->events_pending : NULL,
4400 .cstat = statc->events,
4401 .cstat_prev = statc->events_prev,
4402 .size = NR_MEMCG_EVENTS,
4403 };
4404 mem_cgroup_stat_aggregate(&ac);
4405
4406 for_each_node_state(nid, N_MEMORY) {
4407 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4408 struct lruvec_stats *lstats = pn->lruvec_stats;
4409 struct lruvec_stats *plstats = NULL;
4410 struct lruvec_stats_percpu *lstatc;
4411
4412 if (parent)
4413 plstats = parent->nodeinfo[nid]->lruvec_stats;
4414
4415 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
4416
4417 ac = (struct aggregate_control) {
4418 .aggregate = lstats->state,
4419 .local = lstats->state_local,
4420 .pending = lstats->state_pending,
4421 .ppending = plstats ? plstats->state_pending : NULL,
4422 .cstat = lstatc->state,
4423 .cstat_prev = lstatc->state_prev,
4424 .size = NR_MEMCG_NODE_STAT_ITEMS,
4425 };
4426 mem_cgroup_stat_aggregate(&ac);
4427
4428 }
4429 WRITE_ONCE(statc->stats_updates, 0);
4430 /* We are in a per-cpu loop here, only do the atomic write once */
4431 if (atomic_long_read(&memcg->vmstats->stats_updates))
4432 atomic_long_set(&memcg->vmstats->stats_updates, 0);
4433}
4434
4435static void mem_cgroup_fork(struct task_struct *task)
4436{
4437 /*
4438 * Set the update flag to cause task->objcg to be initialized lazily
4439 * on the first allocation. It can be done without any synchronization
4440 * because it's always performed on the current task, so does
4441 * current_objcg_update().
4442 */
4443 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
4444}
4445
4446static void mem_cgroup_exit(struct task_struct *task)
4447{
4448 struct obj_cgroup *objcg = task->objcg;
4449
4450 objcg = (struct obj_cgroup *)
4451 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
4452 obj_cgroup_put(objcg);
4453
4454 /*
4455 * Some kernel allocations can happen after this point,
4456 * but let's ignore them. It can be done without any synchronization
4457 * because it's always performed on the current task, so does
4458 * current_objcg_update().
4459 */
4460 task->objcg = NULL;
4461}
4462
4463#ifdef CONFIG_LRU_GEN
4464static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
4465{
4466 struct task_struct *task;
4467 struct cgroup_subsys_state *css;
4468
4469 /* find the first leader if there is any */
4470 cgroup_taskset_for_each_leader(task, css, tset)
4471 break;
4472
4473 if (!task)
4474 return;
4475
4476 task_lock(task);
4477 if (task->mm && READ_ONCE(task->mm->owner) == task)
4478 lru_gen_migrate_mm(task->mm);
4479 task_unlock(task);
4480}
4481#else
4482static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4483#endif /* CONFIG_LRU_GEN */
4484
4485static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4486{
4487 struct task_struct *task;
4488 struct cgroup_subsys_state *css;
4489
4490 cgroup_taskset_for_each(task, css, tset) {
4491 /* atomically set the update bit */
4492 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4493 }
4494}
4495
4496static void mem_cgroup_attach(struct cgroup_taskset *tset)
4497{
4498 mem_cgroup_lru_gen_attach(tset);
4499 mem_cgroup_kmem_attach(tset);
4500}
4501
4502static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4503{
4504 if (value == PAGE_COUNTER_MAX)
4505 seq_puts(m, "max\n");
4506 else
4507 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4508
4509 return 0;
4510}
4511
4512static u64 memory_current_read(struct cgroup_subsys_state *css,
4513 struct cftype *cft)
4514{
4515 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4516
4517 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4518}
4519
4520#define OFP_PEAK_UNSET (((-1UL)))
4521
4522static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4523{
4524 struct cgroup_of_peak *ofp = of_peak(sf->private);
4525 u64 fd_peak = READ_ONCE(ofp->value), peak;
4526
4527 /* User wants global or local peak? */
4528 if (fd_peak == OFP_PEAK_UNSET)
4529 peak = pc->watermark;
4530 else
4531 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4532
4533 seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4534 return 0;
4535}
4536
4537static int memory_peak_show(struct seq_file *sf, void *v)
4538{
4539 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4540
4541 return peak_show(sf, v, &memcg->memory);
4542}
4543
4544static int peak_open(struct kernfs_open_file *of)
4545{
4546 struct cgroup_of_peak *ofp = of_peak(of);
4547
4548 ofp->value = OFP_PEAK_UNSET;
4549 return 0;
4550}
4551
4552static void peak_release(struct kernfs_open_file *of)
4553{
4554 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4555 struct cgroup_of_peak *ofp = of_peak(of);
4556
4557 if (ofp->value == OFP_PEAK_UNSET) {
4558 /* fast path (no writes on this fd) */
4559 return;
4560 }
4561 spin_lock(&memcg->peaks_lock);
4562 list_del(&ofp->list);
4563 spin_unlock(&memcg->peaks_lock);
4564}
4565
4566static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4567 loff_t off, struct page_counter *pc,
4568 struct list_head *watchers)
4569{
4570 unsigned long usage;
4571 struct cgroup_of_peak *peer_ctx;
4572 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4573 struct cgroup_of_peak *ofp = of_peak(of);
4574
4575 spin_lock(&memcg->peaks_lock);
4576
4577 usage = page_counter_read(pc);
4578 WRITE_ONCE(pc->local_watermark, usage);
4579
4580 list_for_each_entry(peer_ctx, watchers, list)
4581 if (usage > peer_ctx->value)
4582 WRITE_ONCE(peer_ctx->value, usage);
4583
4584 /* initial write, register watcher */
4585 if (ofp->value == OFP_PEAK_UNSET)
4586 list_add(&ofp->list, watchers);
4587
4588 WRITE_ONCE(ofp->value, usage);
4589 spin_unlock(&memcg->peaks_lock);
4590
4591 return nbytes;
4592}
4593
4594static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4595 size_t nbytes, loff_t off)
4596{
4597 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4598
4599 return peak_write(of, buf, nbytes, off, &memcg->memory,
4600 &memcg->memory_peaks);
4601}
4602
4603#undef OFP_PEAK_UNSET
4604
4605static int memory_min_show(struct seq_file *m, void *v)
4606{
4607 return seq_puts_memcg_tunable(m,
4608 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4609}
4610
4611static ssize_t memory_min_write(struct kernfs_open_file *of,
4612 char *buf, size_t nbytes, loff_t off)
4613{
4614 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4615 unsigned long min;
4616 int err;
4617
4618 buf = strstrip(buf);
4619 err = page_counter_memparse(buf, "max", &min);
4620 if (err)
4621 return err;
4622
4623 page_counter_set_min(&memcg->memory, min);
4624
4625 return nbytes;
4626}
4627
4628static int memory_low_show(struct seq_file *m, void *v)
4629{
4630 return seq_puts_memcg_tunable(m,
4631 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4632}
4633
4634static ssize_t memory_low_write(struct kernfs_open_file *of,
4635 char *buf, size_t nbytes, loff_t off)
4636{
4637 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4638 unsigned long low;
4639 int err;
4640
4641 buf = strstrip(buf);
4642 err = page_counter_memparse(buf, "max", &low);
4643 if (err)
4644 return err;
4645
4646 page_counter_set_low(&memcg->memory, low);
4647
4648 return nbytes;
4649}
4650
4651static int memory_high_show(struct seq_file *m, void *v)
4652{
4653 return seq_puts_memcg_tunable(m,
4654 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4655}
4656
4657static ssize_t memory_high_write(struct kernfs_open_file *of,
4658 char *buf, size_t nbytes, loff_t off)
4659{
4660 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4661 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4662 bool drained = false;
4663 unsigned long high;
4664 int err;
4665
4666 buf = strstrip(buf);
4667 err = page_counter_memparse(buf, "max", &high);
4668 if (err)
4669 return err;
4670
4671 page_counter_set_high(&memcg->memory, high);
4672
4673 if (of->file->f_flags & O_NONBLOCK)
4674 goto out;
4675
4676 for (;;) {
4677 unsigned long nr_pages = page_counter_read(&memcg->memory);
4678 unsigned long reclaimed;
4679
4680 if (nr_pages <= high)
4681 break;
4682
4683 if (signal_pending(current))
4684 break;
4685
4686 if (!drained) {
4687 drain_all_stock(memcg);
4688 drained = true;
4689 continue;
4690 }
4691
4692 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4693 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4694
4695 if (!reclaimed && !nr_retries--)
4696 break;
4697 }
4698out:
4699 memcg_wb_domain_size_changed(memcg);
4700 return nbytes;
4701}
4702
4703static int memory_max_show(struct seq_file *m, void *v)
4704{
4705 return seq_puts_memcg_tunable(m,
4706 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4707}
4708
4709static ssize_t memory_max_write(struct kernfs_open_file *of,
4710 char *buf, size_t nbytes, loff_t off)
4711{
4712 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4713 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4714 bool drained = false;
4715 unsigned long max;
4716 int err;
4717
4718 buf = strstrip(buf);
4719 err = page_counter_memparse(buf, "max", &max);
4720 if (err)
4721 return err;
4722
4723 xchg(&memcg->memory.max, max);
4724
4725 if (of->file->f_flags & O_NONBLOCK)
4726 goto out;
4727
4728 for (;;) {
4729 unsigned long nr_pages = page_counter_read(&memcg->memory);
4730
4731 if (nr_pages <= max)
4732 break;
4733
4734 if (signal_pending(current))
4735 break;
4736
4737 if (!drained) {
4738 drain_all_stock(memcg);
4739 drained = true;
4740 continue;
4741 }
4742
4743 if (nr_reclaims) {
4744 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4745 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4746 nr_reclaims--;
4747 continue;
4748 }
4749
4750 memcg_memory_event(memcg, MEMCG_OOM);
4751 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4752 break;
4753 cond_resched();
4754 }
4755out:
4756 memcg_wb_domain_size_changed(memcg);
4757 return nbytes;
4758}
4759
4760/*
4761 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4762 * if any new events become available.
4763 */
4764static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4765{
4766 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4767 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4768 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4769 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4770 seq_printf(m, "oom_kill %lu\n",
4771 atomic_long_read(&events[MEMCG_OOM_KILL]));
4772 seq_printf(m, "oom_group_kill %lu\n",
4773 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4774 seq_printf(m, "sock_throttled %lu\n",
4775 atomic_long_read(&events[MEMCG_SOCK_THROTTLED]));
4776}
4777
4778static int memory_events_show(struct seq_file *m, void *v)
4779{
4780 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4781
4782 __memory_events_show(m, memcg->memory_events);
4783 return 0;
4784}
4785
4786static int memory_events_local_show(struct seq_file *m, void *v)
4787{
4788 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4789
4790 __memory_events_show(m, memcg->memory_events_local);
4791 return 0;
4792}
4793
4794int memory_stat_show(struct seq_file *m, void *v)
4795{
4796 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4797 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4798 struct seq_buf s;
4799
4800 if (!buf)
4801 return -ENOMEM;
4802 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4803 memory_stat_format(memcg, &s);
4804 seq_puts(m, buf);
4805 kfree(buf);
4806 return 0;
4807}
4808
4809#ifdef CONFIG_NUMA
4810static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4811 int item)
4812{
4813 return lruvec_page_state(lruvec, item) *
4814 memcg_page_state_output_unit(item);
4815}
4816
4817static int memory_numa_stat_show(struct seq_file *m, void *v)
4818{
4819 int i;
4820 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4821
4822 mem_cgroup_flush_stats(memcg);
4823
4824 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4825 int nid;
4826
4827 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4828 continue;
4829
4830 seq_printf(m, "%s", memory_stats[i].name);
4831 for_each_node_state(nid, N_MEMORY) {
4832 u64 size;
4833 struct lruvec *lruvec;
4834
4835 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4836 size = lruvec_page_state_output(lruvec,
4837 memory_stats[i].idx);
4838 seq_printf(m, " N%d=%llu", nid, size);
4839 }
4840 seq_putc(m, '\n');
4841 }
4842
4843 return 0;
4844}
4845#endif
4846
4847static int memory_oom_group_show(struct seq_file *m, void *v)
4848{
4849 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4850
4851 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4852
4853 return 0;
4854}
4855
4856static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4857 char *buf, size_t nbytes, loff_t off)
4858{
4859 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4860 int ret, oom_group;
4861
4862 buf = strstrip(buf);
4863 if (!buf)
4864 return -EINVAL;
4865
4866 ret = kstrtoint(buf, 0, &oom_group);
4867 if (ret)
4868 return ret;
4869
4870 if (oom_group != 0 && oom_group != 1)
4871 return -EINVAL;
4872
4873 WRITE_ONCE(memcg->oom_group, oom_group);
4874
4875 return nbytes;
4876}
4877
4878static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4879 size_t nbytes, loff_t off)
4880{
4881 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4882 int ret;
4883
4884 ret = user_proactive_reclaim(buf, memcg, NULL);
4885 if (ret)
4886 return ret;
4887
4888 return nbytes;
4889}
4890
4891static struct cftype memory_files[] = {
4892 {
4893 .name = "current",
4894 .flags = CFTYPE_NOT_ON_ROOT,
4895 .read_u64 = memory_current_read,
4896 },
4897 {
4898 .name = "peak",
4899 .flags = CFTYPE_NOT_ON_ROOT,
4900 .open = peak_open,
4901 .release = peak_release,
4902 .seq_show = memory_peak_show,
4903 .write = memory_peak_write,
4904 },
4905 {
4906 .name = "min",
4907 .flags = CFTYPE_NOT_ON_ROOT,
4908 .seq_show = memory_min_show,
4909 .write = memory_min_write,
4910 },
4911 {
4912 .name = "low",
4913 .flags = CFTYPE_NOT_ON_ROOT,
4914 .seq_show = memory_low_show,
4915 .write = memory_low_write,
4916 },
4917 {
4918 .name = "high",
4919 .flags = CFTYPE_NOT_ON_ROOT,
4920 .seq_show = memory_high_show,
4921 .write = memory_high_write,
4922 },
4923 {
4924 .name = "max",
4925 .flags = CFTYPE_NOT_ON_ROOT,
4926 .seq_show = memory_max_show,
4927 .write = memory_max_write,
4928 },
4929 {
4930 .name = "events",
4931 .flags = CFTYPE_NOT_ON_ROOT,
4932 .file_offset = offsetof(struct mem_cgroup, events_file),
4933 .seq_show = memory_events_show,
4934 },
4935 {
4936 .name = "events.local",
4937 .flags = CFTYPE_NOT_ON_ROOT,
4938 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4939 .seq_show = memory_events_local_show,
4940 },
4941 {
4942 .name = "stat",
4943 .seq_show = memory_stat_show,
4944 },
4945#ifdef CONFIG_NUMA
4946 {
4947 .name = "numa_stat",
4948 .seq_show = memory_numa_stat_show,
4949 },
4950#endif
4951 {
4952 .name = "oom.group",
4953 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4954 .seq_show = memory_oom_group_show,
4955 .write = memory_oom_group_write,
4956 },
4957 {
4958 .name = "reclaim",
4959 .flags = CFTYPE_NS_DELEGATABLE,
4960 .write = memory_reclaim,
4961 },
4962 { } /* terminate */
4963};
4964
4965struct cgroup_subsys memory_cgrp_subsys = {
4966 .css_alloc = mem_cgroup_css_alloc,
4967 .css_online = mem_cgroup_css_online,
4968 .css_offline = mem_cgroup_css_offline,
4969 .css_released = mem_cgroup_css_released,
4970 .css_free = mem_cgroup_css_free,
4971 .css_reset = mem_cgroup_css_reset,
4972 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4973 .attach = mem_cgroup_attach,
4974 .fork = mem_cgroup_fork,
4975 .exit = mem_cgroup_exit,
4976 .dfl_cftypes = memory_files,
4977#ifdef CONFIG_MEMCG_V1
4978 .legacy_cftypes = mem_cgroup_legacy_files,
4979#endif
4980 .early_init = 0,
4981};
4982
4983/**
4984 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4985 * @root: the top ancestor of the sub-tree being checked
4986 * @memcg: the memory cgroup to check
4987 *
4988 * WARNING: This function is not stateless! It can only be used as part
4989 * of a top-down tree iteration, not for isolated queries.
4990 */
4991void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4992 struct mem_cgroup *memcg)
4993{
4994 bool recursive_protection =
4995 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4996
4997 if (mem_cgroup_disabled())
4998 return;
4999
5000 if (!root)
5001 root = root_mem_cgroup;
5002
5003 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
5004}
5005
5006static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
5007 gfp_t gfp)
5008{
5009 int ret = 0;
5010 struct obj_cgroup *objcg;
5011
5012 objcg = get_obj_cgroup_from_memcg(memcg);
5013 /* Do not account at the root objcg level. */
5014 if (!obj_cgroup_is_root(objcg))
5015 ret = try_charge_memcg(memcg, gfp, folio_nr_pages(folio));
5016 if (ret) {
5017 obj_cgroup_put(objcg);
5018 return ret;
5019 }
5020 commit_charge(folio, objcg);
5021 memcg1_commit_charge(folio, memcg);
5022
5023 return ret;
5024}
5025
5026int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
5027{
5028 struct mem_cgroup *memcg;
5029 int ret;
5030
5031 memcg = get_mem_cgroup_from_mm(mm);
5032 ret = charge_memcg(folio, memcg, gfp);
5033 css_put(&memcg->css);
5034
5035 return ret;
5036}
5037
5038/**
5039 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
5040 * @folio: folio being charged
5041 * @gfp: reclaim mode
5042 *
5043 * This function is called when allocating a huge page folio, after the page has
5044 * already been obtained and charged to the appropriate hugetlb cgroup
5045 * controller (if it is enabled).
5046 *
5047 * Returns ENOMEM if the memcg is already full.
5048 * Returns 0 if either the charge was successful, or if we skip the charging.
5049 */
5050int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
5051{
5052 struct mem_cgroup *memcg = get_mem_cgroup_from_current();
5053 int ret = 0;
5054
5055 /*
5056 * Even memcg does not account for hugetlb, we still want to update
5057 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
5058 * charging the memcg.
5059 */
5060 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
5061 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5062 goto out;
5063
5064 if (charge_memcg(folio, memcg, gfp))
5065 ret = -ENOMEM;
5066
5067out:
5068 mem_cgroup_put(memcg);
5069 return ret;
5070}
5071
5072/**
5073 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
5074 * @folio: folio to charge.
5075 * @mm: mm context of the victim
5076 * @gfp: reclaim mode
5077 * @entry: swap entry for which the folio is allocated
5078 *
5079 * This function charges a folio allocated for swapin. Please call this before
5080 * adding the folio to the swapcache.
5081 *
5082 * Returns 0 on success. Otherwise, an error code is returned.
5083 */
5084int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
5085 gfp_t gfp, swp_entry_t entry)
5086{
5087 struct mem_cgroup *memcg;
5088 unsigned short id;
5089 int ret;
5090
5091 if (mem_cgroup_disabled())
5092 return 0;
5093
5094 id = lookup_swap_cgroup_id(entry);
5095 rcu_read_lock();
5096 memcg = mem_cgroup_from_private_id(id);
5097 if (!memcg || !css_tryget_online(&memcg->css))
5098 memcg = get_mem_cgroup_from_mm(mm);
5099 rcu_read_unlock();
5100
5101 ret = charge_memcg(folio, memcg, gfp);
5102
5103 css_put(&memcg->css);
5104 return ret;
5105}
5106
5107struct uncharge_gather {
5108 struct obj_cgroup *objcg;
5109 unsigned long nr_memory;
5110 unsigned long pgpgout;
5111 unsigned long nr_kmem;
5112 int nid;
5113};
5114
5115static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5116{
5117 memset(ug, 0, sizeof(*ug));
5118}
5119
5120static void uncharge_batch(const struct uncharge_gather *ug)
5121{
5122 struct mem_cgroup *memcg;
5123
5124 rcu_read_lock();
5125 memcg = obj_cgroup_memcg(ug->objcg);
5126 if (ug->nr_memory) {
5127 memcg_uncharge(memcg, ug->nr_memory);
5128 if (ug->nr_kmem) {
5129 mod_memcg_state(memcg, MEMCG_KMEM, -ug->nr_kmem);
5130 memcg1_account_kmem(memcg, -ug->nr_kmem);
5131 }
5132 memcg1_oom_recover(memcg);
5133 }
5134
5135 memcg1_uncharge_batch(memcg, ug->pgpgout, ug->nr_memory, ug->nid);
5136 rcu_read_unlock();
5137
5138 /* drop reference from uncharge_folio */
5139 obj_cgroup_put(ug->objcg);
5140}
5141
5142static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
5143{
5144 long nr_pages;
5145 struct obj_cgroup *objcg;
5146
5147 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5148
5149 /*
5150 * Nobody should be changing or seriously looking at
5151 * folio objcg at this point, we have fully exclusive
5152 * access to the folio.
5153 */
5154 objcg = folio_objcg(folio);
5155 if (!objcg)
5156 return;
5157
5158 if (ug->objcg != objcg) {
5159 if (ug->objcg) {
5160 uncharge_batch(ug);
5161 uncharge_gather_clear(ug);
5162 }
5163 ug->objcg = objcg;
5164 ug->nid = folio_nid(folio);
5165
5166 /* pairs with obj_cgroup_put in uncharge_batch */
5167 obj_cgroup_get(objcg);
5168 }
5169
5170 nr_pages = folio_nr_pages(folio);
5171
5172 if (folio_memcg_kmem(folio)) {
5173 ug->nr_memory += nr_pages;
5174 ug->nr_kmem += nr_pages;
5175 } else {
5176 /* LRU pages aren't accounted at the root level */
5177 if (!obj_cgroup_is_root(objcg))
5178 ug->nr_memory += nr_pages;
5179 ug->pgpgout++;
5180
5181 WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
5182 }
5183
5184 folio->memcg_data = 0;
5185 obj_cgroup_put(objcg);
5186}
5187
5188void __mem_cgroup_uncharge(struct folio *folio)
5189{
5190 struct uncharge_gather ug;
5191
5192 /* Don't touch folio->lru of any random page, pre-check: */
5193 if (!folio_memcg_charged(folio))
5194 return;
5195
5196 uncharge_gather_clear(&ug);
5197 uncharge_folio(folio, &ug);
5198 uncharge_batch(&ug);
5199}
5200
5201void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
5202{
5203 struct uncharge_gather ug;
5204 unsigned int i;
5205
5206 uncharge_gather_clear(&ug);
5207 for (i = 0; i < folios->nr; i++)
5208 uncharge_folio(folios->folios[i], &ug);
5209 if (ug.objcg)
5210 uncharge_batch(&ug);
5211}
5212
5213/**
5214 * mem_cgroup_replace_folio - Charge a folio's replacement.
5215 * @old: Currently circulating folio.
5216 * @new: Replacement folio.
5217 *
5218 * Charge @new as a replacement folio for @old. @old will
5219 * be uncharged upon free.
5220 *
5221 * Both folios must be locked, @new->mapping must be set up.
5222 */
5223void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
5224{
5225 struct mem_cgroup *memcg;
5226 struct obj_cgroup *objcg;
5227 long nr_pages = folio_nr_pages(new);
5228
5229 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5230 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5231 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5232 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
5233
5234 if (mem_cgroup_disabled())
5235 return;
5236
5237 /* Page cache replacement: new folio already charged? */
5238 if (folio_memcg_charged(new))
5239 return;
5240
5241 objcg = folio_objcg(old);
5242 VM_WARN_ON_ONCE_FOLIO(!objcg, old);
5243 if (!objcg)
5244 return;
5245
5246 rcu_read_lock();
5247 memcg = obj_cgroup_memcg(objcg);
5248 /* Force-charge the new page. The old one will be freed soon */
5249 if (!obj_cgroup_is_root(objcg)) {
5250 page_counter_charge(&memcg->memory, nr_pages);
5251 if (do_memsw_account())
5252 page_counter_charge(&memcg->memsw, nr_pages);
5253 }
5254
5255 obj_cgroup_get(objcg);
5256 commit_charge(new, objcg);
5257 memcg1_commit_charge(new, memcg);
5258 rcu_read_unlock();
5259}
5260
5261/**
5262 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
5263 * @old: Currently circulating folio.
5264 * @new: Replacement folio.
5265 *
5266 * Transfer the memcg data from the old folio to the new folio for migration.
5267 * The old folio's data info will be cleared. Note that the memory counters
5268 * will remain unchanged throughout the process.
5269 *
5270 * Both folios must be locked, @new->mapping must be set up.
5271 */
5272void mem_cgroup_migrate(struct folio *old, struct folio *new)
5273{
5274 struct obj_cgroup *objcg;
5275
5276 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5277 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5278 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5279 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
5280 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
5281
5282 if (mem_cgroup_disabled())
5283 return;
5284
5285 objcg = folio_objcg(old);
5286 /*
5287 * Note that it is normal to see !objcg for a hugetlb folio.
5288 * For e.g, it could have been allocated when memory_hugetlb_accounting
5289 * was not selected.
5290 */
5291 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !objcg, old);
5292 if (!objcg)
5293 return;
5294
5295 /* Transfer the charge and the objcg ref */
5296 commit_charge(new, objcg);
5297
5298 /* Warning should never happen, so don't worry about refcount non-0 */
5299 WARN_ON_ONCE(folio_unqueue_deferred_split(old));
5300 old->memcg_data = 0;
5301}
5302
5303DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5304EXPORT_SYMBOL(memcg_sockets_enabled_key);
5305
5306void mem_cgroup_sk_alloc(struct sock *sk)
5307{
5308 struct mem_cgroup *memcg;
5309
5310 if (!mem_cgroup_sockets_enabled)
5311 return;
5312
5313 /* Do not associate the sock with unrelated interrupted task's memcg. */
5314 if (!in_task())
5315 return;
5316
5317 rcu_read_lock();
5318 memcg = mem_cgroup_from_task(current);
5319 if (mem_cgroup_is_root(memcg))
5320 goto out;
5321 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
5322 goto out;
5323 if (css_tryget(&memcg->css))
5324 sk->sk_memcg = memcg;
5325out:
5326 rcu_read_unlock();
5327}
5328
5329void mem_cgroup_sk_free(struct sock *sk)
5330{
5331 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5332
5333 if (memcg)
5334 css_put(&memcg->css);
5335}
5336
5337void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
5338{
5339 struct mem_cgroup *memcg;
5340
5341 if (sk->sk_memcg == newsk->sk_memcg)
5342 return;
5343
5344 mem_cgroup_sk_free(newsk);
5345
5346 memcg = mem_cgroup_from_sk(sk);
5347 if (memcg)
5348 css_get(&memcg->css);
5349
5350 newsk->sk_memcg = sk->sk_memcg;
5351}
5352
5353/**
5354 * mem_cgroup_sk_charge - charge socket memory
5355 * @sk: socket in memcg to charge
5356 * @nr_pages: number of pages to charge
5357 * @gfp_mask: reclaim mode
5358 *
5359 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5360 * @memcg's configured limit, %false if it doesn't.
5361 */
5362bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
5363 gfp_t gfp_mask)
5364{
5365 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5366
5367 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5368 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
5369
5370 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
5371 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5372 return true;
5373 }
5374
5375 return false;
5376}
5377
5378/**
5379 * mem_cgroup_sk_uncharge - uncharge socket memory
5380 * @sk: socket in memcg to uncharge
5381 * @nr_pages: number of pages to uncharge
5382 */
5383void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages)
5384{
5385 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5386
5387 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5388 memcg1_uncharge_skmem(memcg, nr_pages);
5389 return;
5390 }
5391
5392 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5393
5394 refill_stock(memcg, nr_pages);
5395}
5396
5397void mem_cgroup_flush_workqueue(void)
5398{
5399 flush_workqueue(memcg_wq);
5400}
5401
5402static int __init cgroup_memory(char *s)
5403{
5404 char *token;
5405
5406 while ((token = strsep(&s, ",")) != NULL) {
5407 if (!*token)
5408 continue;
5409 if (!strcmp(token, "nosocket"))
5410 cgroup_memory_nosocket = true;
5411 if (!strcmp(token, "nokmem"))
5412 cgroup_memory_nokmem = true;
5413 if (!strcmp(token, "nobpf"))
5414 cgroup_memory_nobpf = true;
5415 }
5416 return 1;
5417}
5418__setup("cgroup.memory=", cgroup_memory);
5419
5420/*
5421 * Memory controller init before cgroup_init() initialize root_mem_cgroup.
5422 *
5423 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5424 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5425 * basically everything that doesn't depend on a specific mem_cgroup structure
5426 * should be initialized from here.
5427 */
5428int __init mem_cgroup_init(void)
5429{
5430 unsigned int memcg_size;
5431 int cpu;
5432
5433 /*
5434 * Currently s32 type (can refer to struct batched_lruvec_stat) is
5435 * used for per-memcg-per-cpu caching of per-node statistics. In order
5436 * to work fine, we should make sure that the overfill threshold can't
5437 * exceed S32_MAX / PAGE_SIZE.
5438 */
5439 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
5440
5441 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5442 memcg_hotplug_cpu_dead);
5443
5444 memcg_wq = alloc_workqueue("memcg", WQ_PERCPU, 0);
5445 WARN_ON(!memcg_wq);
5446
5447 for_each_possible_cpu(cpu) {
5448 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5449 drain_local_memcg_stock);
5450 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
5451 drain_local_obj_stock);
5452 }
5453
5454 memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
5455 memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
5456 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
5457
5458 memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
5459 SLAB_PANIC | SLAB_HWCACHE_ALIGN);
5460
5461 return 0;
5462}
5463
5464#ifdef CONFIG_SWAP
5465/**
5466 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5467 * @folio: folio being added to swap
5468 * @entry: swap entry to charge
5469 *
5470 * Try to charge @folio's memcg for the swap space at @entry.
5471 *
5472 * Returns 0 on success, -ENOMEM on failure.
5473 */
5474int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5475{
5476 unsigned int nr_pages = folio_nr_pages(folio);
5477 struct page_counter *counter;
5478 struct mem_cgroup *memcg;
5479 struct obj_cgroup *objcg;
5480
5481 if (do_memsw_account())
5482 return 0;
5483
5484 objcg = folio_objcg(folio);
5485 VM_WARN_ON_ONCE_FOLIO(!objcg, folio);
5486 if (!objcg)
5487 return 0;
5488
5489 rcu_read_lock();
5490 memcg = obj_cgroup_memcg(objcg);
5491 if (!entry.val) {
5492 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5493 rcu_read_unlock();
5494 return 0;
5495 }
5496
5497 memcg = mem_cgroup_private_id_get_online(memcg, nr_pages);
5498 /* memcg is pined by memcg ID. */
5499 rcu_read_unlock();
5500
5501 if (!mem_cgroup_is_root(memcg) &&
5502 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5503 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5504 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5505 mem_cgroup_private_id_put(memcg, nr_pages);
5506 return -ENOMEM;
5507 }
5508 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5509
5510 swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
5511
5512 return 0;
5513}
5514
5515/**
5516 * __mem_cgroup_uncharge_swap - uncharge swap space
5517 * @entry: swap entry to uncharge
5518 * @nr_pages: the amount of swap space to uncharge
5519 */
5520void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5521{
5522 struct mem_cgroup *memcg;
5523 unsigned short id;
5524
5525 id = swap_cgroup_clear(entry, nr_pages);
5526 rcu_read_lock();
5527 memcg = mem_cgroup_from_private_id(id);
5528 if (memcg) {
5529 if (!mem_cgroup_is_root(memcg)) {
5530 if (do_memsw_account())
5531 page_counter_uncharge(&memcg->memsw, nr_pages);
5532 else
5533 page_counter_uncharge(&memcg->swap, nr_pages);
5534 }
5535 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5536 mem_cgroup_private_id_put(memcg, nr_pages);
5537 }
5538 rcu_read_unlock();
5539}
5540
5541long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5542{
5543 long nr_swap_pages = get_nr_swap_pages();
5544
5545 if (mem_cgroup_disabled() || do_memsw_account())
5546 return nr_swap_pages;
5547 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5548 nr_swap_pages = min_t(long, nr_swap_pages,
5549 READ_ONCE(memcg->swap.max) -
5550 page_counter_read(&memcg->swap));
5551 return nr_swap_pages;
5552}
5553
5554bool mem_cgroup_swap_full(struct folio *folio)
5555{
5556 struct mem_cgroup *memcg;
5557 bool ret = false;
5558
5559 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5560
5561 if (vm_swap_full())
5562 return true;
5563 if (do_memsw_account() || !folio_memcg_charged(folio))
5564 return ret;
5565
5566 rcu_read_lock();
5567 memcg = folio_memcg(folio);
5568 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5569 unsigned long usage = page_counter_read(&memcg->swap);
5570
5571 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5572 usage * 2 >= READ_ONCE(memcg->swap.max)) {
5573 ret = true;
5574 break;
5575 }
5576 }
5577 rcu_read_unlock();
5578
5579 return ret;
5580}
5581
5582static int __init setup_swap_account(char *s)
5583{
5584 bool res;
5585
5586 if (!kstrtobool(s, &res) && !res)
5587 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5588 "in favor of configuring swap control via cgroupfs. "
5589 "Please report your usecase to linux-mm@kvack.org if you "
5590 "depend on this functionality.\n");
5591 return 1;
5592}
5593__setup("swapaccount=", setup_swap_account);
5594
5595static u64 swap_current_read(struct cgroup_subsys_state *css,
5596 struct cftype *cft)
5597{
5598 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5599
5600 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5601}
5602
5603static int swap_peak_show(struct seq_file *sf, void *v)
5604{
5605 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5606
5607 return peak_show(sf, v, &memcg->swap);
5608}
5609
5610static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5611 size_t nbytes, loff_t off)
5612{
5613 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5614
5615 return peak_write(of, buf, nbytes, off, &memcg->swap,
5616 &memcg->swap_peaks);
5617}
5618
5619static int swap_high_show(struct seq_file *m, void *v)
5620{
5621 return seq_puts_memcg_tunable(m,
5622 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5623}
5624
5625static ssize_t swap_high_write(struct kernfs_open_file *of,
5626 char *buf, size_t nbytes, loff_t off)
5627{
5628 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5629 unsigned long high;
5630 int err;
5631
5632 buf = strstrip(buf);
5633 err = page_counter_memparse(buf, "max", &high);
5634 if (err)
5635 return err;
5636
5637 page_counter_set_high(&memcg->swap, high);
5638
5639 return nbytes;
5640}
5641
5642static int swap_max_show(struct seq_file *m, void *v)
5643{
5644 return seq_puts_memcg_tunable(m,
5645 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5646}
5647
5648static ssize_t swap_max_write(struct kernfs_open_file *of,
5649 char *buf, size_t nbytes, loff_t off)
5650{
5651 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5652 unsigned long max;
5653 int err;
5654
5655 buf = strstrip(buf);
5656 err = page_counter_memparse(buf, "max", &max);
5657 if (err)
5658 return err;
5659
5660 xchg(&memcg->swap.max, max);
5661
5662 return nbytes;
5663}
5664
5665static int swap_events_show(struct seq_file *m, void *v)
5666{
5667 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5668
5669 seq_printf(m, "high %lu\n",
5670 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5671 seq_printf(m, "max %lu\n",
5672 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5673 seq_printf(m, "fail %lu\n",
5674 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5675
5676 return 0;
5677}
5678
5679static struct cftype swap_files[] = {
5680 {
5681 .name = "swap.current",
5682 .flags = CFTYPE_NOT_ON_ROOT,
5683 .read_u64 = swap_current_read,
5684 },
5685 {
5686 .name = "swap.high",
5687 .flags = CFTYPE_NOT_ON_ROOT,
5688 .seq_show = swap_high_show,
5689 .write = swap_high_write,
5690 },
5691 {
5692 .name = "swap.max",
5693 .flags = CFTYPE_NOT_ON_ROOT,
5694 .seq_show = swap_max_show,
5695 .write = swap_max_write,
5696 },
5697 {
5698 .name = "swap.peak",
5699 .flags = CFTYPE_NOT_ON_ROOT,
5700 .open = peak_open,
5701 .release = peak_release,
5702 .seq_show = swap_peak_show,
5703 .write = swap_peak_write,
5704 },
5705 {
5706 .name = "swap.events",
5707 .flags = CFTYPE_NOT_ON_ROOT,
5708 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5709 .seq_show = swap_events_show,
5710 },
5711 { } /* terminate */
5712};
5713
5714#ifdef CONFIG_ZSWAP
5715/**
5716 * obj_cgroup_may_zswap - check if this cgroup can zswap
5717 * @objcg: the object cgroup
5718 *
5719 * Check if the hierarchical zswap limit has been reached.
5720 *
5721 * This doesn't check for specific headroom, and it is not atomic
5722 * either. But with zswap, the size of the allocation is only known
5723 * once compression has occurred, and this optimistic pre-check avoids
5724 * spending cycles on compression when there is already no room left
5725 * or zswap is disabled altogether somewhere in the hierarchy.
5726 */
5727bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5728{
5729 struct mem_cgroup *memcg, *original_memcg;
5730 bool ret = true;
5731
5732 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5733 return true;
5734
5735 original_memcg = get_mem_cgroup_from_objcg(objcg);
5736 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5737 memcg = parent_mem_cgroup(memcg)) {
5738 unsigned long max = READ_ONCE(memcg->zswap_max);
5739 unsigned long pages;
5740
5741 if (max == PAGE_COUNTER_MAX)
5742 continue;
5743 if (max == 0) {
5744 ret = false;
5745 break;
5746 }
5747
5748 /* Force flush to get accurate stats for charging */
5749 __mem_cgroup_flush_stats(memcg, true);
5750 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5751 if (pages < max)
5752 continue;
5753 ret = false;
5754 break;
5755 }
5756 mem_cgroup_put(original_memcg);
5757 return ret;
5758}
5759
5760/**
5761 * obj_cgroup_charge_zswap - charge compression backend memory
5762 * @objcg: the object cgroup
5763 * @size: size of compressed object
5764 *
5765 * This forces the charge after obj_cgroup_may_zswap() allowed
5766 * compression and storage in zswap for this cgroup to go ahead.
5767 */
5768void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5769{
5770 struct mem_cgroup *memcg;
5771
5772 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5773 return;
5774
5775 if (obj_cgroup_is_root(objcg))
5776 return;
5777
5778 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5779
5780 /* PF_MEMALLOC context, charging must succeed */
5781 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5782 VM_WARN_ON_ONCE(1);
5783
5784 rcu_read_lock();
5785 memcg = obj_cgroup_memcg(objcg);
5786 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5787 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5788 if (size == PAGE_SIZE)
5789 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, 1);
5790 rcu_read_unlock();
5791}
5792
5793/**
5794 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5795 * @objcg: the object cgroup
5796 * @size: size of compressed object
5797 *
5798 * Uncharges zswap memory on page in.
5799 */
5800void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5801{
5802 struct mem_cgroup *memcg;
5803
5804 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5805 return;
5806
5807 if (obj_cgroup_is_root(objcg))
5808 return;
5809
5810 obj_cgroup_uncharge(objcg, size);
5811
5812 rcu_read_lock();
5813 memcg = obj_cgroup_memcg(objcg);
5814 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5815 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5816 if (size == PAGE_SIZE)
5817 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, -1);
5818 rcu_read_unlock();
5819}
5820
5821bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5822{
5823 /* if zswap is disabled, do not block pages going to the swapping device */
5824 if (!zswap_is_enabled())
5825 return true;
5826
5827 for (; memcg; memcg = parent_mem_cgroup(memcg))
5828 if (!READ_ONCE(memcg->zswap_writeback))
5829 return false;
5830
5831 return true;
5832}
5833
5834static u64 zswap_current_read(struct cgroup_subsys_state *css,
5835 struct cftype *cft)
5836{
5837 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5838
5839 mem_cgroup_flush_stats(memcg);
5840 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5841}
5842
5843static int zswap_max_show(struct seq_file *m, void *v)
5844{
5845 return seq_puts_memcg_tunable(m,
5846 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5847}
5848
5849static ssize_t zswap_max_write(struct kernfs_open_file *of,
5850 char *buf, size_t nbytes, loff_t off)
5851{
5852 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5853 unsigned long max;
5854 int err;
5855
5856 buf = strstrip(buf);
5857 err = page_counter_memparse(buf, "max", &max);
5858 if (err)
5859 return err;
5860
5861 xchg(&memcg->zswap_max, max);
5862
5863 return nbytes;
5864}
5865
5866static int zswap_writeback_show(struct seq_file *m, void *v)
5867{
5868 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5869
5870 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5871 return 0;
5872}
5873
5874static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5875 char *buf, size_t nbytes, loff_t off)
5876{
5877 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5878 int zswap_writeback;
5879 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5880
5881 if (parse_ret)
5882 return parse_ret;
5883
5884 if (zswap_writeback != 0 && zswap_writeback != 1)
5885 return -EINVAL;
5886
5887 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5888 return nbytes;
5889}
5890
5891static struct cftype zswap_files[] = {
5892 {
5893 .name = "zswap.current",
5894 .flags = CFTYPE_NOT_ON_ROOT,
5895 .read_u64 = zswap_current_read,
5896 },
5897 {
5898 .name = "zswap.max",
5899 .flags = CFTYPE_NOT_ON_ROOT,
5900 .seq_show = zswap_max_show,
5901 .write = zswap_max_write,
5902 },
5903 {
5904 .name = "zswap.writeback",
5905 .seq_show = zswap_writeback_show,
5906 .write = zswap_writeback_write,
5907 },
5908 { } /* terminate */
5909};
5910#endif /* CONFIG_ZSWAP */
5911
5912static int __init mem_cgroup_swap_init(void)
5913{
5914 if (mem_cgroup_disabled())
5915 return 0;
5916
5917 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5918#ifdef CONFIG_MEMCG_V1
5919 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5920#endif
5921#ifdef CONFIG_ZSWAP
5922 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5923#endif
5924 return 0;
5925}
5926subsys_initcall(mem_cgroup_swap_init);
5927
5928#endif /* CONFIG_SWAP */
5929
5930void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask)
5931{
5932 nodemask_t allowed;
5933
5934 if (!memcg)
5935 return;
5936
5937 /*
5938 * Since this interface is intended for use by migration paths, and
5939 * reclaim and migration are subject to race conditions such as changes
5940 * in effective_mems and hot-unpluging of nodes, inaccurate allowed
5941 * mask is acceptable.
5942 */
5943 cpuset_nodes_allowed(memcg->css.cgroup, &allowed);
5944 nodes_and(*mask, *mask, allowed);
5945}
5946
5947void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
5948{
5949 if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5950 return;
5951
5952 if (!memcg)
5953 memcg = root_mem_cgroup;
5954
5955 pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
5956 K(atomic_long_read(&memcg->memory.children_min_usage)),
5957 K(atomic_long_read(&memcg->memory.children_low_usage)));
5958}