Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/kernel.h>
18#include <linux/page_counter.h>
19#include <linux/vmpressure.h>
20#include <linux/eventfd.h>
21#include <linux/mm.h>
22#include <linux/vmstat.h>
23#include <linux/writeback.h>
24#include <linux/page-flags.h>
25#include <linux/shrinker.h>
26
27struct mem_cgroup;
28struct obj_cgroup;
29struct page;
30struct mm_struct;
31struct kmem_cache;
32
33/* Cgroup-specific page state, on top of universal node page state */
34enum memcg_stat_item {
35 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
36 MEMCG_SOCK,
37 MEMCG_PERCPU_B,
38 MEMCG_KMEM,
39 MEMCG_ZSWAP_B,
40 MEMCG_ZSWAPPED,
41 MEMCG_ZSWAP_INCOMP,
42 MEMCG_NR_STAT,
43};
44
45enum memcg_memory_event {
46 MEMCG_LOW,
47 MEMCG_HIGH,
48 MEMCG_MAX,
49 MEMCG_OOM,
50 MEMCG_OOM_KILL,
51 MEMCG_OOM_GROUP_KILL,
52 MEMCG_SWAP_HIGH,
53 MEMCG_SWAP_MAX,
54 MEMCG_SWAP_FAIL,
55 MEMCG_SOCK_THROTTLED,
56 MEMCG_NR_MEMORY_EVENTS,
57};
58
59struct mem_cgroup_reclaim_cookie {
60 pg_data_t *pgdat;
61 int generation;
62};
63
64#ifdef CONFIG_MEMCG
65
66#define MEM_CGROUP_ID_SHIFT 16
67
68struct mem_cgroup_private_id {
69 int id;
70 refcount_t ref;
71};
72
73struct memcg_vmstats_percpu;
74struct memcg1_events_percpu;
75struct memcg_vmstats;
76struct lruvec_stats_percpu;
77struct lruvec_stats;
78
79struct mem_cgroup_reclaim_iter {
80 struct mem_cgroup *position;
81 /* scan generation, increased every round-trip */
82 atomic_t generation;
83};
84
85/*
86 * per-node information in memory controller.
87 */
88struct mem_cgroup_per_node {
89 /* Keep the read-only fields at the start */
90 struct mem_cgroup *memcg; /* Back pointer, we cannot */
91 /* use container_of */
92
93 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
94 struct lruvec_stats *lruvec_stats;
95 struct shrinker_info __rcu *shrinker_info;
96
97#ifdef CONFIG_MEMCG_V1
98 /*
99 * Memcg-v1 only stuff in middle as buffer between read mostly fields
100 * and update often fields to avoid false sharing. If v1 stuff is
101 * not present, an explicit padding is needed.
102 */
103
104 struct rb_node tree_node; /* RB tree node */
105 unsigned long usage_in_excess;/* Set to the value by which */
106 /* the soft limit is exceeded*/
107 bool on_tree;
108#else
109 CACHELINE_PADDING(_pad1_);
110#endif
111
112 /* Fields which get updated often at the end. */
113 struct lruvec lruvec;
114 CACHELINE_PADDING(_pad2_);
115 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
116 struct mem_cgroup_reclaim_iter iter;
117
118 /*
119 * objcg is wiped out as a part of the objcg repaprenting process.
120 * orig_objcg preserves a pointer (and a reference) to the original
121 * objcg until the end of live of memcg.
122 */
123 struct obj_cgroup __rcu *objcg;
124 struct obj_cgroup *orig_objcg;
125 /* list of inherited objcgs, protected by objcg_lock */
126 struct list_head objcg_list;
127
128#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
129 /* slab stats for nmi context */
130 atomic_t slab_reclaimable;
131 atomic_t slab_unreclaimable;
132#endif
133};
134
135struct mem_cgroup_threshold {
136 struct eventfd_ctx *eventfd;
137 unsigned long threshold;
138};
139
140/* For threshold */
141struct mem_cgroup_threshold_ary {
142 /* An array index points to threshold just below or equal to usage. */
143 int current_threshold;
144 /* Size of entries[] */
145 unsigned int size;
146 /* Array of thresholds */
147 struct mem_cgroup_threshold entries[] __counted_by(size);
148};
149
150struct mem_cgroup_thresholds {
151 /* Primary thresholds array */
152 struct mem_cgroup_threshold_ary *primary;
153 /*
154 * Spare threshold array.
155 * This is needed to make mem_cgroup_unregister_event() "never fail".
156 * It must be able to store at least primary->size - 1 entries.
157 */
158 struct mem_cgroup_threshold_ary *spare;
159};
160
161/*
162 * Remember four most recent foreign writebacks with dirty pages in this
163 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
164 * one in a given round, we're likely to catch it later if it keeps
165 * foreign-dirtying, so a fairly low count should be enough.
166 *
167 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
168 */
169#define MEMCG_CGWB_FRN_CNT 4
170
171struct memcg_cgwb_frn {
172 u64 bdi_id; /* bdi->id of the foreign inode */
173 int memcg_id; /* memcg->css.id of foreign inode */
174 u64 at; /* jiffies_64 at the time of dirtying */
175 struct wb_completion done; /* tracks in-flight foreign writebacks */
176};
177
178/*
179 * Bucket for arbitrarily byte-sized objects charged to a memory
180 * cgroup. The bucket can be reparented in one piece when the cgroup
181 * is destroyed, without having to round up the individual references
182 * of all live memory objects in the wild.
183 */
184struct obj_cgroup {
185 struct percpu_ref refcnt;
186 struct mem_cgroup *memcg;
187 atomic_t nr_charged_bytes;
188 union {
189 struct list_head list; /* protected by objcg_lock */
190 struct rcu_head rcu;
191 };
192 bool is_root;
193};
194
195/*
196 * The memory controller data structure. The memory controller controls both
197 * page cache and RSS per cgroup. We would eventually like to provide
198 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
199 * to help the administrator determine what knobs to tune.
200 */
201struct mem_cgroup {
202 struct cgroup_subsys_state css;
203
204 /* Private memcg ID. Used to ID objects that outlive the cgroup */
205 struct mem_cgroup_private_id id;
206
207 /* Accounted resources */
208 struct page_counter memory; /* Both v1 & v2 */
209
210 union {
211 struct page_counter swap; /* v2 only */
212 struct page_counter memsw; /* v1 only */
213 };
214
215 /* registered local peak watchers */
216 struct list_head memory_peaks;
217 struct list_head swap_peaks;
218 spinlock_t peaks_lock;
219
220 /* Range enforcement for interrupt charges */
221 struct work_struct high_work;
222
223#ifdef CONFIG_ZSWAP
224 unsigned long zswap_max;
225
226 /*
227 * Prevent pages from this memcg from being written back from zswap to
228 * swap, and from being swapped out on zswap store failures.
229 */
230 bool zswap_writeback;
231#endif
232
233 /* vmpressure notifications */
234 struct vmpressure vmpressure;
235
236 /*
237 * Should the OOM killer kill all belonging tasks, had it kill one?
238 */
239 bool oom_group;
240
241 int swappiness;
242
243 /* memory.events and memory.events.local */
244 struct cgroup_file events_file;
245 struct cgroup_file events_local_file;
246
247 /* handle for "memory.swap.events" */
248 struct cgroup_file swap_events_file;
249
250 /* memory.stat */
251 struct memcg_vmstats *vmstats;
252
253 /* memory.events */
254 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
255 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
256
257#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
258 /* MEMCG_KMEM for nmi context */
259 atomic_t kmem_stat;
260#endif
261 /*
262 * Hint of reclaim pressure for socket memroy management. Note
263 * that this indicator should NOT be used in legacy cgroup mode
264 * where socket memory is accounted/charged separately.
265 */
266 u64 socket_pressure;
267#if BITS_PER_LONG < 64
268 seqlock_t socket_pressure_seqlock;
269#endif
270 int kmemcg_id;
271
272 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
273
274#ifdef CONFIG_CGROUP_WRITEBACK
275 struct list_head cgwb_list;
276 struct wb_domain cgwb_domain;
277 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
278#endif
279
280#ifdef CONFIG_TRANSPARENT_HUGEPAGE
281 struct deferred_split deferred_split_queue;
282#endif
283
284#ifdef CONFIG_LRU_GEN_WALKS_MMU
285 /* per-memcg mm_struct list */
286 struct lru_gen_mm_list mm_list;
287#endif
288
289#ifdef CONFIG_MEMCG_V1
290 /* Legacy consumer-oriented counters */
291 struct page_counter kmem; /* v1 only */
292 struct page_counter tcpmem; /* v1 only */
293
294 struct memcg1_events_percpu __percpu *events_percpu;
295
296 unsigned long soft_limit;
297
298 /* protected by memcg_oom_lock */
299 bool oom_lock;
300 int under_oom;
301
302 /* OOM-Killer disable */
303 int oom_kill_disable;
304
305 /* protect arrays of thresholds */
306 struct mutex thresholds_lock;
307
308 /* thresholds for memory usage. RCU-protected */
309 struct mem_cgroup_thresholds thresholds;
310
311 /* thresholds for mem+swap usage. RCU-protected */
312 struct mem_cgroup_thresholds memsw_thresholds;
313
314 /* For oom notifier event fd */
315 struct list_head oom_notify;
316
317 /* Legacy tcp memory accounting */
318 bool tcpmem_active;
319 int tcpmem_pressure;
320
321 /* List of events which userspace want to receive */
322 struct list_head event_list;
323 spinlock_t event_list_lock;
324#endif /* CONFIG_MEMCG_V1 */
325
326 struct mem_cgroup_per_node *nodeinfo[];
327};
328
329/*
330 * size of first charge trial.
331 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
332 * workload.
333 */
334#define MEMCG_CHARGE_BATCH 64U
335
336extern struct mem_cgroup *root_mem_cgroup;
337
338enum page_memcg_data_flags {
339 /* page->memcg_data is a pointer to an slabobj_ext vector */
340 MEMCG_DATA_OBJEXTS = (1UL << 0),
341 /* page has been accounted as a non-slab kernel page */
342 MEMCG_DATA_KMEM = (1UL << 1),
343 /* the next bit after the last actual flag */
344 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
345};
346
347#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
348#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
349
350#else /* CONFIG_MEMCG */
351
352#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
353#define __FIRST_OBJEXT_FLAG (1UL << 0)
354
355#endif /* CONFIG_MEMCG */
356
357enum objext_flags {
358 /*
359 * Use bit 0 with zero other bits to signal that slabobj_ext vector
360 * failed to allocate. The same bit 0 with valid upper bits means
361 * MEMCG_DATA_OBJEXTS.
362 */
363 OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
364 __OBJEXTS_FLAG_UNUSED = __FIRST_OBJEXT_FLAG,
365 /* the next bit after the last actual flag */
366 __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
367};
368
369#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
370
371#ifdef CONFIG_MEMCG
372/*
373 * After the initialization objcg->memcg is always pointing at
374 * a valid memcg, but can be atomically swapped to the parent memcg.
375 *
376 * The caller must ensure that the returned memcg won't be released.
377 */
378static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
379{
380 lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
381 return READ_ONCE(objcg->memcg);
382}
383
384/*
385 * folio_objcg - get the object cgroup associated with a folio.
386 * @folio: Pointer to the folio.
387 *
388 * Returns a pointer to the object cgroup associated with the folio,
389 * or NULL. This function assumes that the folio is known to have a
390 * proper object cgroup pointer.
391 */
392static inline struct obj_cgroup *folio_objcg(struct folio *folio)
393{
394 unsigned long memcg_data = folio->memcg_data;
395
396 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
397 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
398
399 return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
400}
401
402/*
403 * folio_memcg - Get the memory cgroup associated with a folio.
404 * @folio: Pointer to the folio.
405 *
406 * Returns a pointer to the memory cgroup associated with the folio,
407 * or NULL. This function assumes that the folio is known to have a
408 * proper memory cgroup pointer. It's not safe to call this function
409 * against some type of folios, e.g. slab folios or ex-slab folios.
410 *
411 * For a folio any of the following ensures folio and objcg binding stability:
412 *
413 * - the folio lock
414 * - LRU isolation
415 * - exclusive reference
416 *
417 * Based on the stable binding of folio and objcg, for a folio any of the
418 * following ensures folio and memcg binding stability:
419 *
420 * - cgroup_mutex
421 * - the lruvec lock
422 *
423 * If the caller only want to ensure that the page counters of memcg are
424 * updated correctly, ensure that the binding stability of folio and objcg
425 * is sufficient.
426 *
427 * Note: The caller should hold an rcu read lock or cgroup_mutex to protect
428 * memcg associated with a folio from being released.
429 */
430static inline struct mem_cgroup *folio_memcg(struct folio *folio)
431{
432 struct obj_cgroup *objcg = folio_objcg(folio);
433
434 return objcg ? obj_cgroup_memcg(objcg) : NULL;
435}
436
437/*
438 * folio_memcg_charged - If a folio is charged to a memory cgroup.
439 * @folio: Pointer to the folio.
440 *
441 * Returns true if folio is charged to a memory cgroup, otherwise returns false.
442 */
443static inline bool folio_memcg_charged(struct folio *folio)
444{
445 return folio->memcg_data != 0;
446}
447
448/*
449 * folio_memcg_check - Get the memory cgroup associated with a folio.
450 * @folio: Pointer to the folio.
451 *
452 * Returns a pointer to the memory cgroup associated with the folio,
453 * or NULL. This function unlike folio_memcg() can take any folio
454 * as an argument. It has to be used in cases when it's not known if a folio
455 * has an associated memory cgroup pointer or an object cgroups vector or
456 * an object cgroup.
457 *
458 * The page and objcg or memcg binding rules can refer to folio_memcg().
459 *
460 * A caller should hold an rcu read lock to protect memcg associated with a
461 * page from being released.
462 */
463static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
464{
465 /*
466 * Because folio->memcg_data might be changed asynchronously
467 * for slabs, READ_ONCE() should be used here.
468 */
469 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
470 struct obj_cgroup *objcg;
471
472 if (memcg_data & MEMCG_DATA_OBJEXTS)
473 return NULL;
474
475 objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
476
477 return objcg ? obj_cgroup_memcg(objcg) : NULL;
478}
479
480static inline struct mem_cgroup *page_memcg_check(struct page *page)
481{
482 if (PageTail(page))
483 return NULL;
484 return folio_memcg_check((struct folio *)page);
485}
486
487static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
488{
489 struct mem_cgroup *memcg;
490
491 rcu_read_lock();
492retry:
493 memcg = obj_cgroup_memcg(objcg);
494 if (unlikely(!css_tryget(&memcg->css)))
495 goto retry;
496 rcu_read_unlock();
497
498 return memcg;
499}
500
501/*
502 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
503 * @folio: Pointer to the folio.
504 *
505 * Checks if the folio has MemcgKmem flag set. The caller must ensure
506 * that the folio has an associated memory cgroup. It's not safe to call
507 * this function against some types of folios, e.g. slab folios.
508 */
509static inline bool folio_memcg_kmem(struct folio *folio)
510{
511 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
512 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio);
513 return folio->memcg_data & MEMCG_DATA_KMEM;
514}
515
516static inline bool PageMemcgKmem(struct page *page)
517{
518 return folio_memcg_kmem(page_folio(page));
519}
520
521static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
522{
523 return (memcg == root_mem_cgroup);
524}
525
526static inline bool obj_cgroup_is_root(const struct obj_cgroup *objcg)
527{
528 return objcg->is_root;
529}
530
531static inline bool mem_cgroup_disabled(void)
532{
533 return !cgroup_subsys_enabled(memory_cgrp_subsys);
534}
535
536static inline void mem_cgroup_protection(struct mem_cgroup *root,
537 struct mem_cgroup *memcg,
538 unsigned long *min,
539 unsigned long *low,
540 unsigned long *usage)
541{
542 *min = *low = *usage = 0;
543
544 if (mem_cgroup_disabled())
545 return;
546
547 *usage = page_counter_read(&memcg->memory);
548 /*
549 * There is no reclaim protection applied to a targeted reclaim.
550 * We are special casing this specific case here because
551 * mem_cgroup_calculate_protection is not robust enough to keep
552 * the protection invariant for calculated effective values for
553 * parallel reclaimers with different reclaim target. This is
554 * especially a problem for tail memcgs (as they have pages on LRU)
555 * which would want to have effective values 0 for targeted reclaim
556 * but a different value for external reclaim.
557 *
558 * Example
559 * Let's have global and A's reclaim in parallel:
560 * |
561 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
562 * |\
563 * | C (low = 1G, usage = 2.5G)
564 * B (low = 1G, usage = 0.5G)
565 *
566 * For the global reclaim
567 * A.elow = A.low
568 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
569 * C.elow = min(C.usage, C.low)
570 *
571 * With the effective values resetting we have A reclaim
572 * A.elow = 0
573 * B.elow = B.low
574 * C.elow = C.low
575 *
576 * If the global reclaim races with A's reclaim then
577 * B.elow = C.elow = 0 because children_low_usage > A.elow)
578 * is possible and reclaiming B would be violating the protection.
579 *
580 */
581 if (root == memcg)
582 return;
583
584 *min = READ_ONCE(memcg->memory.emin);
585 *low = READ_ONCE(memcg->memory.elow);
586}
587
588void mem_cgroup_calculate_protection(struct mem_cgroup *root,
589 struct mem_cgroup *memcg);
590
591static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
592 struct mem_cgroup *memcg)
593{
594 /*
595 * The root memcg doesn't account charges, and doesn't support
596 * protection. The target memcg's protection is ignored, see
597 * mem_cgroup_calculate_protection() and mem_cgroup_protection()
598 */
599 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
600 memcg == target;
601}
602
603static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
604 struct mem_cgroup *memcg)
605{
606 if (mem_cgroup_unprotected(target, memcg))
607 return false;
608
609 return READ_ONCE(memcg->memory.elow) >=
610 page_counter_read(&memcg->memory);
611}
612
613static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
614 struct mem_cgroup *memcg)
615{
616 if (mem_cgroup_unprotected(target, memcg))
617 return false;
618
619 return READ_ONCE(memcg->memory.emin) >=
620 page_counter_read(&memcg->memory);
621}
622
623int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
624
625/**
626 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
627 * @folio: Folio to charge.
628 * @mm: mm context of the allocating task.
629 * @gfp: Reclaim mode.
630 *
631 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
632 * pages according to @gfp if necessary. If @mm is NULL, try to
633 * charge to the active memcg.
634 *
635 * Do not use this for folios allocated for swapin.
636 *
637 * Return: 0 on success. Otherwise, an error code is returned.
638 */
639static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
640 gfp_t gfp)
641{
642 if (mem_cgroup_disabled())
643 return 0;
644 return __mem_cgroup_charge(folio, mm, gfp);
645}
646
647int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
648
649int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
650 gfp_t gfp, swp_entry_t entry);
651
652void __mem_cgroup_uncharge(struct folio *folio);
653
654/**
655 * mem_cgroup_uncharge - Uncharge a folio.
656 * @folio: Folio to uncharge.
657 *
658 * Uncharge a folio previously charged with mem_cgroup_charge().
659 */
660static inline void mem_cgroup_uncharge(struct folio *folio)
661{
662 if (mem_cgroup_disabled())
663 return;
664 __mem_cgroup_uncharge(folio);
665}
666
667void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
668static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
669{
670 if (mem_cgroup_disabled())
671 return;
672 __mem_cgroup_uncharge_folios(folios);
673}
674
675void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
676void mem_cgroup_migrate(struct folio *old, struct folio *new);
677
678/**
679 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
680 * @memcg: memcg of the wanted lruvec
681 * @pgdat: pglist_data
682 *
683 * Returns the lru list vector holding pages for a given @memcg &
684 * @pgdat combination. This can be the node lruvec, if the memory
685 * controller is disabled.
686 */
687static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
688 struct pglist_data *pgdat)
689{
690 struct mem_cgroup_per_node *mz;
691 struct lruvec *lruvec;
692
693 if (mem_cgroup_disabled()) {
694 lruvec = &pgdat->__lruvec;
695 goto out;
696 }
697
698 if (!memcg)
699 memcg = root_mem_cgroup;
700
701 mz = memcg->nodeinfo[pgdat->node_id];
702 lruvec = &mz->lruvec;
703out:
704 /*
705 * Since a node can be onlined after the mem_cgroup was created,
706 * we have to be prepared to initialize lruvec->pgdat here;
707 * and if offlined then reonlined, we need to reinitialize it.
708 */
709 if (unlikely(lruvec->pgdat != pgdat))
710 lruvec->pgdat = pgdat;
711 return lruvec;
712}
713
714/**
715 * folio_lruvec - return lruvec for isolating/putting an LRU folio
716 * @folio: Pointer to the folio.
717 *
718 * Call with rcu_read_lock() held to ensure the lifetime of the returned lruvec.
719 * Note that this alone will NOT guarantee the stability of the folio->lruvec
720 * association; the folio can be reparented to an ancestor if this races with
721 * cgroup deletion.
722 *
723 * Use folio_lruvec_lock() to ensure both lifetime and stability of the binding.
724 * Once a lruvec is locked, folio_lruvec() can be called on other folios, and
725 * their binding is stable if the returned lruvec matches the one the caller has
726 * locked. Useful for lock batching.
727 */
728static inline struct lruvec *folio_lruvec(struct folio *folio)
729{
730 struct mem_cgroup *memcg = folio_memcg(folio);
731
732 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
733 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
734}
735
736struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
737
738struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
739
740struct mem_cgroup *get_mem_cgroup_from_current(void);
741
742struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
743
744struct lruvec *folio_lruvec_lock(struct folio *folio);
745struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
746struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
747 unsigned long *flags);
748
749static inline
750struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
751 return css ? container_of(css, struct mem_cgroup, css) : NULL;
752}
753
754static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
755{
756 if (obj_cgroup_is_root(objcg))
757 return true;
758 return percpu_ref_tryget(&objcg->refcnt);
759}
760
761static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
762 unsigned long nr)
763{
764 if (!obj_cgroup_is_root(objcg))
765 percpu_ref_get_many(&objcg->refcnt, nr);
766}
767
768static inline void obj_cgroup_get(struct obj_cgroup *objcg)
769{
770 obj_cgroup_get_many(objcg, 1);
771}
772
773static inline void obj_cgroup_put(struct obj_cgroup *objcg)
774{
775 if (objcg && !obj_cgroup_is_root(objcg))
776 percpu_ref_put(&objcg->refcnt);
777}
778
779static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
780{
781 return !memcg || css_tryget(&memcg->css);
782}
783
784static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
785{
786 return !memcg || css_tryget_online(&memcg->css);
787}
788
789static inline void mem_cgroup_put(struct mem_cgroup *memcg)
790{
791 if (memcg)
792 css_put(&memcg->css);
793}
794
795#define mem_cgroup_from_counter(counter, member) \
796 container_of(counter, struct mem_cgroup, member)
797
798struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
799 struct mem_cgroup *,
800 struct mem_cgroup_reclaim_cookie *);
801void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
802void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
803 int (*)(struct task_struct *, void *), void *arg);
804
805static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
806{
807 if (mem_cgroup_disabled())
808 return 0;
809
810 return memcg->id.id;
811}
812struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id);
813
814static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
815{
816 return memcg ? cgroup_id(memcg->css.cgroup) : 0;
817}
818
819struct mem_cgroup *mem_cgroup_get_from_id(u64 id);
820
821static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
822{
823 return mem_cgroup_from_css(seq_css(m));
824}
825
826static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
827{
828 struct mem_cgroup_per_node *mz;
829
830 if (mem_cgroup_disabled())
831 return NULL;
832
833 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
834 return mz->memcg;
835}
836
837/**
838 * parent_mem_cgroup - find the accounting parent of a memcg
839 * @memcg: memcg whose parent to find
840 *
841 * Returns the parent memcg, or NULL if this is the root.
842 */
843static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
844{
845 return mem_cgroup_from_css(memcg->css.parent);
846}
847
848static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
849 struct mem_cgroup *root)
850{
851 if (root == memcg)
852 return true;
853 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
854}
855
856static inline bool mm_match_cgroup(struct mm_struct *mm,
857 struct mem_cgroup *memcg)
858{
859 struct mem_cgroup *task_memcg;
860 bool match = false;
861
862 rcu_read_lock();
863 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
864 if (task_memcg)
865 match = mem_cgroup_is_descendant(task_memcg, memcg);
866 rcu_read_unlock();
867 return match;
868}
869
870struct cgroup_subsys_state *get_mem_cgroup_css_from_folio(struct folio *folio);
871ino_t page_cgroup_ino(struct page *page);
872
873static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
874{
875 if (mem_cgroup_disabled())
876 return true;
877 return css_is_online(&memcg->css);
878}
879
880void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
881 int zid, long nr_pages);
882
883static inline
884unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
885 enum lru_list lru, int zone_idx)
886{
887 struct mem_cgroup_per_node *mz;
888
889 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
890 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
891}
892
893void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
894
895static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
896{
897 if (unlikely(current->memcg_nr_pages_over_high))
898 __mem_cgroup_handle_over_high(gfp_mask);
899}
900
901unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
902
903void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
904 struct task_struct *p);
905
906void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
907
908struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
909 struct mem_cgroup *oom_domain);
910void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
911
912/* idx can be of type enum memcg_stat_item or node_stat_item */
913void mod_memcg_state(struct mem_cgroup *memcg,
914 enum memcg_stat_item idx, int val);
915
916static inline void mod_memcg_page_state(struct page *page,
917 enum memcg_stat_item idx, int val)
918{
919 struct mem_cgroup *memcg;
920
921 if (mem_cgroup_disabled())
922 return;
923
924 rcu_read_lock();
925 memcg = folio_memcg(page_folio(page));
926 if (memcg)
927 mod_memcg_state(memcg, idx, val);
928 rcu_read_unlock();
929}
930
931unsigned long memcg_events(struct mem_cgroup *memcg, int event);
932unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
933unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
934bool memcg_stat_item_valid(int idx);
935bool memcg_vm_event_item_valid(enum vm_event_item idx);
936unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
937unsigned long lruvec_page_state_local(struct lruvec *lruvec,
938 enum node_stat_item idx);
939
940void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
941void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
942
943void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
944
945void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
946 unsigned long count);
947
948static inline void count_memcg_folio_events(struct folio *folio,
949 enum vm_event_item idx, unsigned long nr)
950{
951 struct mem_cgroup *memcg;
952
953 if (!folio_memcg_charged(folio))
954 return;
955
956 rcu_read_lock();
957 memcg = folio_memcg(folio);
958 count_memcg_events(memcg, idx, nr);
959 rcu_read_unlock();
960}
961
962static inline void count_memcg_events_mm(struct mm_struct *mm,
963 enum vm_event_item idx, unsigned long count)
964{
965 struct mem_cgroup *memcg;
966
967 if (mem_cgroup_disabled())
968 return;
969
970 rcu_read_lock();
971 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
972 if (likely(memcg))
973 count_memcg_events(memcg, idx, count);
974 rcu_read_unlock();
975}
976
977static inline void count_memcg_event_mm(struct mm_struct *mm,
978 enum vm_event_item idx)
979{
980 count_memcg_events_mm(mm, idx, 1);
981}
982
983void __memcg_memory_event(struct mem_cgroup *memcg,
984 enum memcg_memory_event event, bool allow_spinning);
985
986static inline void memcg_memory_event(struct mem_cgroup *memcg,
987 enum memcg_memory_event event)
988{
989 __memcg_memory_event(memcg, event, true);
990}
991
992static inline void memcg_memory_event_mm(struct mm_struct *mm,
993 enum memcg_memory_event event)
994{
995 struct mem_cgroup *memcg;
996
997 if (mem_cgroup_disabled())
998 return;
999
1000 rcu_read_lock();
1001 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1002 if (likely(memcg))
1003 memcg_memory_event(memcg, event);
1004 rcu_read_unlock();
1005}
1006
1007void split_page_memcg(struct page *first, unsigned order);
1008void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
1009 unsigned new_order);
1010
1011static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
1012{
1013 struct mem_cgroup *memcg;
1014 u64 id;
1015
1016 if (mem_cgroup_disabled())
1017 return 0;
1018
1019 rcu_read_lock();
1020 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1021 if (!memcg)
1022 memcg = root_mem_cgroup;
1023 id = cgroup_id(memcg->css.cgroup);
1024 rcu_read_unlock();
1025 return id;
1026}
1027
1028void mem_cgroup_flush_workqueue(void);
1029
1030extern int mem_cgroup_init(void);
1031#else /* CONFIG_MEMCG */
1032
1033#define MEM_CGROUP_ID_SHIFT 0
1034
1035#define root_mem_cgroup (NULL)
1036
1037static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1038{
1039 return NULL;
1040}
1041
1042static inline bool folio_memcg_charged(struct folio *folio)
1043{
1044 return false;
1045}
1046
1047static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
1048{
1049 return NULL;
1050}
1051
1052static inline struct mem_cgroup *page_memcg_check(struct page *page)
1053{
1054 return NULL;
1055}
1056
1057static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
1058{
1059 return NULL;
1060}
1061
1062static inline bool folio_memcg_kmem(struct folio *folio)
1063{
1064 return false;
1065}
1066
1067static inline bool PageMemcgKmem(struct page *page)
1068{
1069 return false;
1070}
1071
1072static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1073{
1074 return true;
1075}
1076
1077static inline bool obj_cgroup_is_root(const struct obj_cgroup *objcg)
1078{
1079 return true;
1080}
1081
1082static inline bool mem_cgroup_disabled(void)
1083{
1084 return true;
1085}
1086
1087static inline void memcg_memory_event(struct mem_cgroup *memcg,
1088 enum memcg_memory_event event)
1089{
1090}
1091
1092static inline void memcg_memory_event_mm(struct mm_struct *mm,
1093 enum memcg_memory_event event)
1094{
1095}
1096
1097static inline void mem_cgroup_protection(struct mem_cgroup *root,
1098 struct mem_cgroup *memcg,
1099 unsigned long *min,
1100 unsigned long *low,
1101 unsigned long *usage)
1102{
1103 *min = *low = *usage = 0;
1104}
1105
1106static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1107 struct mem_cgroup *memcg)
1108{
1109}
1110
1111static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
1112 struct mem_cgroup *memcg)
1113{
1114 return true;
1115}
1116static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
1117 struct mem_cgroup *memcg)
1118{
1119 return false;
1120}
1121
1122static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
1123 struct mem_cgroup *memcg)
1124{
1125 return false;
1126}
1127
1128static inline int mem_cgroup_charge(struct folio *folio,
1129 struct mm_struct *mm, gfp_t gfp)
1130{
1131 return 0;
1132}
1133
1134static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
1135{
1136 return 0;
1137}
1138
1139static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
1140 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1141{
1142 return 0;
1143}
1144
1145static inline void mem_cgroup_uncharge(struct folio *folio)
1146{
1147}
1148
1149static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
1150{
1151}
1152
1153static inline void mem_cgroup_replace_folio(struct folio *old,
1154 struct folio *new)
1155{
1156}
1157
1158static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1159{
1160}
1161
1162static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1163 struct pglist_data *pgdat)
1164{
1165 return &pgdat->__lruvec;
1166}
1167
1168static inline struct lruvec *folio_lruvec(struct folio *folio)
1169{
1170 struct pglist_data *pgdat = folio_pgdat(folio);
1171 return &pgdat->__lruvec;
1172}
1173
1174static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1175{
1176 return NULL;
1177}
1178
1179static inline bool mm_match_cgroup(struct mm_struct *mm,
1180 struct mem_cgroup *memcg)
1181{
1182 return true;
1183}
1184
1185static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1186{
1187 return NULL;
1188}
1189
1190static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1191{
1192 return NULL;
1193}
1194
1195static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
1196{
1197 return NULL;
1198}
1199
1200static inline
1201struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1202{
1203 return NULL;
1204}
1205
1206static inline void obj_cgroup_get(struct obj_cgroup *objcg)
1207{
1208}
1209
1210static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1211{
1212}
1213
1214static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1215{
1216 return true;
1217}
1218
1219static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
1220{
1221 return true;
1222}
1223
1224static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1225{
1226}
1227
1228static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1229{
1230 struct pglist_data *pgdat = folio_pgdat(folio);
1231
1232 rcu_read_lock();
1233 spin_lock(&pgdat->__lruvec.lru_lock);
1234 return &pgdat->__lruvec;
1235}
1236
1237static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1238{
1239 struct pglist_data *pgdat = folio_pgdat(folio);
1240
1241 rcu_read_lock();
1242 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1243 return &pgdat->__lruvec;
1244}
1245
1246static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1247 unsigned long *flagsp)
1248{
1249 struct pglist_data *pgdat = folio_pgdat(folio);
1250
1251 rcu_read_lock();
1252 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1253 return &pgdat->__lruvec;
1254}
1255
1256static inline struct mem_cgroup *
1257mem_cgroup_iter(struct mem_cgroup *root,
1258 struct mem_cgroup *prev,
1259 struct mem_cgroup_reclaim_cookie *reclaim)
1260{
1261 return NULL;
1262}
1263
1264static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1265 struct mem_cgroup *prev)
1266{
1267}
1268
1269static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1270 int (*fn)(struct task_struct *, void *), void *arg)
1271{
1272}
1273
1274static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg)
1275{
1276 return 0;
1277}
1278
1279static inline struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
1280{
1281 WARN_ON_ONCE(id);
1282 /* XXX: This should always return root_mem_cgroup */
1283 return NULL;
1284}
1285
1286static inline u64 mem_cgroup_id(struct mem_cgroup *memcg)
1287{
1288 return 0;
1289}
1290
1291static inline struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
1292{
1293 return NULL;
1294}
1295
1296static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1297{
1298 return NULL;
1299}
1300
1301static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1302{
1303 return NULL;
1304}
1305
1306static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1307{
1308 return true;
1309}
1310
1311static inline
1312unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1313 enum lru_list lru, int zone_idx)
1314{
1315 return 0;
1316}
1317
1318static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1319{
1320 return 0;
1321}
1322
1323static inline void
1324mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1325{
1326}
1327
1328static inline void
1329mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1330{
1331}
1332
1333static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
1334{
1335}
1336
1337static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1338 struct task_struct *victim, struct mem_cgroup *oom_domain)
1339{
1340 return NULL;
1341}
1342
1343static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1344{
1345}
1346
1347static inline void mod_memcg_state(struct mem_cgroup *memcg,
1348 enum memcg_stat_item idx,
1349 int nr)
1350{
1351}
1352
1353static inline void mod_memcg_page_state(struct page *page,
1354 enum memcg_stat_item idx, int val)
1355{
1356}
1357
1358static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1359{
1360 return 0;
1361}
1362
1363static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1364{
1365 return 0;
1366}
1367
1368static inline bool memcg_stat_item_valid(int idx)
1369{
1370 return false;
1371}
1372
1373static inline bool memcg_vm_event_item_valid(enum vm_event_item idx)
1374{
1375 return false;
1376}
1377
1378static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1379 enum node_stat_item idx)
1380{
1381 return node_page_state(lruvec_pgdat(lruvec), idx);
1382}
1383
1384static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1385 enum node_stat_item idx)
1386{
1387 return node_page_state(lruvec_pgdat(lruvec), idx);
1388}
1389
1390static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1391{
1392}
1393
1394static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1395{
1396}
1397
1398static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1399 int val)
1400{
1401 struct page *page = virt_to_head_page(p);
1402
1403 mod_node_page_state(page_pgdat(page), idx, val);
1404}
1405
1406static inline void count_memcg_events(struct mem_cgroup *memcg,
1407 enum vm_event_item idx,
1408 unsigned long count)
1409{
1410}
1411
1412static inline void count_memcg_folio_events(struct folio *folio,
1413 enum vm_event_item idx, unsigned long nr)
1414{
1415}
1416
1417static inline void count_memcg_events_mm(struct mm_struct *mm,
1418 enum vm_event_item idx, unsigned long count)
1419{
1420}
1421
1422static inline
1423void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1424{
1425}
1426
1427static inline void split_page_memcg(struct page *first, unsigned order)
1428{
1429}
1430
1431static inline void folio_split_memcg_refs(struct folio *folio,
1432 unsigned old_order, unsigned new_order)
1433{
1434}
1435
1436static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
1437{
1438 return 0;
1439}
1440
1441static inline void mem_cgroup_flush_workqueue(void) { }
1442
1443static inline int mem_cgroup_init(void) { return 0; }
1444#endif /* CONFIG_MEMCG */
1445
1446/*
1447 * Extended information for slab objects stored as an array in page->memcg_data
1448 * if MEMCG_DATA_OBJEXTS is set.
1449 */
1450struct slabobj_ext {
1451#ifdef CONFIG_MEMCG
1452 struct obj_cgroup *objcg;
1453#endif
1454#ifdef CONFIG_MEM_ALLOC_PROFILING
1455 union codetag_ref ref;
1456#endif
1457} __aligned(8);
1458
1459static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1460{
1461 struct mem_cgroup *memcg;
1462
1463 memcg = lruvec_memcg(lruvec);
1464 if (!memcg)
1465 return NULL;
1466 memcg = parent_mem_cgroup(memcg);
1467 if (!memcg)
1468 return NULL;
1469 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1470}
1471
1472static inline void lruvec_lock_irq(struct lruvec *lruvec)
1473{
1474 rcu_read_lock();
1475 spin_lock_irq(&lruvec->lru_lock);
1476}
1477
1478static inline void lruvec_unlock(struct lruvec *lruvec)
1479{
1480 spin_unlock(&lruvec->lru_lock);
1481 rcu_read_unlock();
1482}
1483
1484static inline void lruvec_unlock_irq(struct lruvec *lruvec)
1485{
1486 spin_unlock_irq(&lruvec->lru_lock);
1487 rcu_read_unlock();
1488}
1489
1490static inline void lruvec_unlock_irqrestore(struct lruvec *lruvec, unsigned long flags)
1491{
1492 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1493 rcu_read_unlock();
1494}
1495
1496/* Test requires a stable folio->memcg binding, see folio_memcg() */
1497static inline bool folio_matches_lruvec(struct folio *folio,
1498 struct lruvec *lruvec)
1499{
1500 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1501 lruvec_memcg(lruvec) == folio_memcg(folio);
1502}
1503
1504/* Don't lock again iff page's lruvec locked */
1505static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1506 struct lruvec *locked_lruvec)
1507{
1508 if (locked_lruvec) {
1509 if (folio_matches_lruvec(folio, locked_lruvec))
1510 return locked_lruvec;
1511
1512 lruvec_unlock_irq(locked_lruvec);
1513 }
1514
1515 return folio_lruvec_lock_irq(folio);
1516}
1517
1518/* Don't lock again iff folio's lruvec locked */
1519static inline void folio_lruvec_relock_irqsave(struct folio *folio,
1520 struct lruvec **lruvecp, unsigned long *flags)
1521{
1522 if (*lruvecp) {
1523 if (folio_matches_lruvec(folio, *lruvecp))
1524 return;
1525
1526 lruvec_unlock_irqrestore(*lruvecp, *flags);
1527 }
1528
1529 *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
1530}
1531
1532#ifdef CONFIG_CGROUP_WRITEBACK
1533
1534struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1535void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1536 unsigned long *pheadroom, unsigned long *pdirty,
1537 unsigned long *pwriteback);
1538
1539void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1540 struct bdi_writeback *wb);
1541
1542static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1543 struct bdi_writeback *wb)
1544{
1545 struct mem_cgroup *memcg;
1546
1547 if (mem_cgroup_disabled())
1548 return;
1549
1550 if (!folio_memcg_charged(folio))
1551 return;
1552
1553 rcu_read_lock();
1554 memcg = folio_memcg(folio);
1555 if (unlikely(&memcg->css != wb->memcg_css))
1556 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1557 rcu_read_unlock();
1558}
1559
1560void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1561
1562#else /* CONFIG_CGROUP_WRITEBACK */
1563
1564static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1565{
1566 return NULL;
1567}
1568
1569static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1570 unsigned long *pfilepages,
1571 unsigned long *pheadroom,
1572 unsigned long *pdirty,
1573 unsigned long *pwriteback)
1574{
1575}
1576
1577static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1578 struct bdi_writeback *wb)
1579{
1580}
1581
1582static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1583{
1584}
1585
1586#endif /* CONFIG_CGROUP_WRITEBACK */
1587
1588struct sock;
1589#ifdef CONFIG_MEMCG
1590extern struct static_key_false memcg_sockets_enabled_key;
1591#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1592
1593void mem_cgroup_sk_alloc(struct sock *sk);
1594void mem_cgroup_sk_free(struct sock *sk);
1595void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
1596bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
1597 gfp_t gfp_mask);
1598void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
1599
1600#if BITS_PER_LONG < 64
1601static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
1602{
1603 u64 val = get_jiffies_64() + HZ;
1604 unsigned long flags;
1605
1606 write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
1607 memcg->socket_pressure = val;
1608 write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
1609}
1610
1611static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
1612{
1613 unsigned int seq;
1614 u64 val;
1615
1616 do {
1617 seq = read_seqbegin(&memcg->socket_pressure_seqlock);
1618 val = memcg->socket_pressure;
1619 } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
1620
1621 return val;
1622}
1623#else
1624static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
1625{
1626 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
1627}
1628
1629static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
1630{
1631 return READ_ONCE(memcg->socket_pressure);
1632}
1633#endif
1634
1635int alloc_shrinker_info(struct mem_cgroup *memcg);
1636void free_shrinker_info(struct mem_cgroup *memcg);
1637void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1638void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1639
1640static inline int shrinker_id(struct shrinker *shrinker)
1641{
1642 return shrinker->id;
1643}
1644#else
1645#define mem_cgroup_sockets_enabled 0
1646
1647static inline void mem_cgroup_sk_alloc(struct sock *sk)
1648{
1649}
1650
1651static inline void mem_cgroup_sk_free(struct sock *sk)
1652{
1653}
1654
1655static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
1656{
1657}
1658
1659static inline bool mem_cgroup_sk_charge(const struct sock *sk,
1660 unsigned int nr_pages,
1661 gfp_t gfp_mask)
1662{
1663 return false;
1664}
1665
1666static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
1667 unsigned int nr_pages)
1668{
1669}
1670
1671static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1672 int nid, int shrinker_id)
1673{
1674}
1675
1676static inline int shrinker_id(struct shrinker *shrinker)
1677{
1678 return -1;
1679}
1680#endif
1681
1682#ifdef CONFIG_MEMCG
1683bool mem_cgroup_kmem_disabled(void);
1684int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1685void __memcg_kmem_uncharge_page(struct page *page, int order);
1686
1687/*
1688 * The returned objcg pointer is safe to use without additional
1689 * protection within a scope. The scope is defined either by
1690 * the current task (similar to the "current" global variable)
1691 * or by set_active_memcg() pair.
1692 * Please, use obj_cgroup_get() to get a reference if the pointer
1693 * needs to be used outside of the local scope.
1694 */
1695struct obj_cgroup *current_obj_cgroup(void);
1696struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
1697
1698static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
1699{
1700 struct obj_cgroup *objcg = current_obj_cgroup();
1701
1702 if (objcg)
1703 obj_cgroup_get(objcg);
1704
1705 return objcg;
1706}
1707
1708int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1709void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1710
1711extern struct static_key_false memcg_bpf_enabled_key;
1712static inline bool memcg_bpf_enabled(void)
1713{
1714 return static_branch_likely(&memcg_bpf_enabled_key);
1715}
1716
1717extern struct static_key_false memcg_kmem_online_key;
1718
1719static inline bool memcg_kmem_online(void)
1720{
1721 return static_branch_likely(&memcg_kmem_online_key);
1722}
1723
1724static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1725 int order)
1726{
1727 if (memcg_kmem_online())
1728 return __memcg_kmem_charge_page(page, gfp, order);
1729 return 0;
1730}
1731
1732static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1733{
1734 if (memcg_kmem_online())
1735 __memcg_kmem_uncharge_page(page, order);
1736}
1737
1738/*
1739 * A helper for accessing memcg's kmem_id, used for getting
1740 * corresponding LRU lists.
1741 */
1742static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1743{
1744 return memcg ? memcg->kmemcg_id : -1;
1745}
1746
1747struct mem_cgroup *mem_cgroup_from_virt(void *p);
1748
1749static inline void count_objcg_events(struct obj_cgroup *objcg,
1750 enum vm_event_item idx,
1751 unsigned long count)
1752{
1753 struct mem_cgroup *memcg;
1754
1755 if (!memcg_kmem_online())
1756 return;
1757
1758 rcu_read_lock();
1759 memcg = obj_cgroup_memcg(objcg);
1760 count_memcg_events(memcg, idx, count);
1761 rcu_read_unlock();
1762}
1763
1764void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask);
1765
1766void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
1767
1768static inline bool memcg_is_dying(struct mem_cgroup *memcg)
1769{
1770 return memcg ? css_is_dying(&memcg->css) : false;
1771}
1772
1773#else
1774static inline bool mem_cgroup_kmem_disabled(void)
1775{
1776 return true;
1777}
1778
1779static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1780 int order)
1781{
1782 return 0;
1783}
1784
1785static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1786{
1787}
1788
1789static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1790 int order)
1791{
1792 return 0;
1793}
1794
1795static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1796{
1797}
1798
1799static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
1800{
1801 return NULL;
1802}
1803
1804static inline bool memcg_bpf_enabled(void)
1805{
1806 return false;
1807}
1808
1809static inline bool memcg_kmem_online(void)
1810{
1811 return false;
1812}
1813
1814static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1815{
1816 return -1;
1817}
1818
1819static inline struct mem_cgroup *mem_cgroup_from_virt(void *p)
1820{
1821 return NULL;
1822}
1823
1824static inline void count_objcg_events(struct obj_cgroup *objcg,
1825 enum vm_event_item idx,
1826 unsigned long count)
1827{
1828}
1829
1830static inline ino_t page_cgroup_ino(struct page *page)
1831{
1832 return 0;
1833}
1834
1835static inline void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg,
1836 nodemask_t *mask)
1837{
1838}
1839
1840static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
1841{
1842}
1843
1844static inline bool memcg_is_dying(struct mem_cgroup *memcg)
1845{
1846 return false;
1847}
1848#endif /* CONFIG_MEMCG */
1849
1850#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
1851bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1852void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1853void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1854bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
1855#else
1856static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1857{
1858 return true;
1859}
1860static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1861 size_t size)
1862{
1863}
1864static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1865 size_t size)
1866{
1867}
1868static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
1869{
1870 /* if zswap is disabled, do not block pages going to the swapping device */
1871 return true;
1872}
1873#endif
1874
1875
1876/* Cgroup v1-related declarations */
1877
1878#ifdef CONFIG_MEMCG_V1
1879unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1880 gfp_t gfp_mask,
1881 unsigned long *total_scanned);
1882
1883bool mem_cgroup_oom_synchronize(bool wait);
1884
1885static inline bool task_in_memcg_oom(struct task_struct *p)
1886{
1887 return p->memcg_in_oom;
1888}
1889
1890static inline void mem_cgroup_enter_user_fault(void)
1891{
1892 WARN_ON(current->in_user_fault);
1893 current->in_user_fault = 1;
1894}
1895
1896static inline void mem_cgroup_exit_user_fault(void)
1897{
1898 WARN_ON(!current->in_user_fault);
1899 current->in_user_fault = 0;
1900}
1901
1902void memcg1_swapout(struct folio *folio, swp_entry_t entry);
1903void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
1904
1905#else /* CONFIG_MEMCG_V1 */
1906static inline
1907unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
1908 gfp_t gfp_mask,
1909 unsigned long *total_scanned)
1910{
1911 return 0;
1912}
1913
1914static inline bool task_in_memcg_oom(struct task_struct *p)
1915{
1916 return false;
1917}
1918
1919static inline bool mem_cgroup_oom_synchronize(bool wait)
1920{
1921 return false;
1922}
1923
1924static inline void mem_cgroup_enter_user_fault(void)
1925{
1926}
1927
1928static inline void mem_cgroup_exit_user_fault(void)
1929{
1930}
1931
1932static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
1933{
1934}
1935
1936static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
1937{
1938}
1939
1940#endif /* CONFIG_MEMCG_V1 */
1941
1942#endif /* _LINUX_MEMCONTROL_H */