Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 74cd4e0e5399480e3fab2cd6a6cbdb17f673c335 1939 lines 48 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11#ifndef _LINUX_MEMCONTROL_H 12#define _LINUX_MEMCONTROL_H 13#include <linux/cgroup.h> 14#include <linux/vm_event_item.h> 15#include <linux/hardirq.h> 16#include <linux/jump_label.h> 17#include <linux/kernel.h> 18#include <linux/page_counter.h> 19#include <linux/vmpressure.h> 20#include <linux/eventfd.h> 21#include <linux/mm.h> 22#include <linux/vmstat.h> 23#include <linux/writeback.h> 24#include <linux/page-flags.h> 25#include <linux/shrinker.h> 26 27struct mem_cgroup; 28struct obj_cgroup; 29struct page; 30struct mm_struct; 31struct kmem_cache; 32 33/* Cgroup-specific page state, on top of universal node page state */ 34enum memcg_stat_item { 35 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 36 MEMCG_SOCK, 37 MEMCG_PERCPU_B, 38 MEMCG_VMALLOC, 39 MEMCG_KMEM, 40 MEMCG_ZSWAP_B, 41 MEMCG_ZSWAPPED, 42 MEMCG_NR_STAT, 43}; 44 45enum memcg_memory_event { 46 MEMCG_LOW, 47 MEMCG_HIGH, 48 MEMCG_MAX, 49 MEMCG_OOM, 50 MEMCG_OOM_KILL, 51 MEMCG_OOM_GROUP_KILL, 52 MEMCG_SWAP_HIGH, 53 MEMCG_SWAP_MAX, 54 MEMCG_SWAP_FAIL, 55 MEMCG_SOCK_THROTTLED, 56 MEMCG_NR_MEMORY_EVENTS, 57}; 58 59struct mem_cgroup_reclaim_cookie { 60 pg_data_t *pgdat; 61 int generation; 62}; 63 64#ifdef CONFIG_MEMCG 65 66#define MEM_CGROUP_ID_SHIFT 16 67 68struct mem_cgroup_private_id { 69 int id; 70 refcount_t ref; 71}; 72 73struct memcg_vmstats_percpu; 74struct memcg1_events_percpu; 75struct memcg_vmstats; 76struct lruvec_stats_percpu; 77struct lruvec_stats; 78 79struct mem_cgroup_reclaim_iter { 80 struct mem_cgroup *position; 81 /* scan generation, increased every round-trip */ 82 atomic_t generation; 83}; 84 85/* 86 * per-node information in memory controller. 87 */ 88struct mem_cgroup_per_node { 89 /* Keep the read-only fields at the start */ 90 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 91 /* use container_of */ 92 93 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 94 struct lruvec_stats *lruvec_stats; 95 struct shrinker_info __rcu *shrinker_info; 96 97#ifdef CONFIG_MEMCG_V1 98 /* 99 * Memcg-v1 only stuff in middle as buffer between read mostly fields 100 * and update often fields to avoid false sharing. If v1 stuff is 101 * not present, an explicit padding is needed. 102 */ 103 104 struct rb_node tree_node; /* RB tree node */ 105 unsigned long usage_in_excess;/* Set to the value by which */ 106 /* the soft limit is exceeded*/ 107 bool on_tree; 108#else 109 CACHELINE_PADDING(_pad1_); 110#endif 111 112 /* Fields which get updated often at the end. */ 113 struct lruvec lruvec; 114 CACHELINE_PADDING(_pad2_); 115 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 116 struct mem_cgroup_reclaim_iter iter; 117 118#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 119 /* slab stats for nmi context */ 120 atomic_t slab_reclaimable; 121 atomic_t slab_unreclaimable; 122#endif 123}; 124 125struct mem_cgroup_threshold { 126 struct eventfd_ctx *eventfd; 127 unsigned long threshold; 128}; 129 130/* For threshold */ 131struct mem_cgroup_threshold_ary { 132 /* An array index points to threshold just below or equal to usage. */ 133 int current_threshold; 134 /* Size of entries[] */ 135 unsigned int size; 136 /* Array of thresholds */ 137 struct mem_cgroup_threshold entries[] __counted_by(size); 138}; 139 140struct mem_cgroup_thresholds { 141 /* Primary thresholds array */ 142 struct mem_cgroup_threshold_ary *primary; 143 /* 144 * Spare threshold array. 145 * This is needed to make mem_cgroup_unregister_event() "never fail". 146 * It must be able to store at least primary->size - 1 entries. 147 */ 148 struct mem_cgroup_threshold_ary *spare; 149}; 150 151/* 152 * Remember four most recent foreign writebacks with dirty pages in this 153 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 154 * one in a given round, we're likely to catch it later if it keeps 155 * foreign-dirtying, so a fairly low count should be enough. 156 * 157 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 158 */ 159#define MEMCG_CGWB_FRN_CNT 4 160 161struct memcg_cgwb_frn { 162 u64 bdi_id; /* bdi->id of the foreign inode */ 163 int memcg_id; /* memcg->css.id of foreign inode */ 164 u64 at; /* jiffies_64 at the time of dirtying */ 165 struct wb_completion done; /* tracks in-flight foreign writebacks */ 166}; 167 168/* 169 * Bucket for arbitrarily byte-sized objects charged to a memory 170 * cgroup. The bucket can be reparented in one piece when the cgroup 171 * is destroyed, without having to round up the individual references 172 * of all live memory objects in the wild. 173 */ 174struct obj_cgroup { 175 struct percpu_ref refcnt; 176 struct mem_cgroup *memcg; 177 atomic_t nr_charged_bytes; 178 union { 179 struct list_head list; /* protected by objcg_lock */ 180 struct rcu_head rcu; 181 }; 182}; 183 184/* 185 * The memory controller data structure. The memory controller controls both 186 * page cache and RSS per cgroup. We would eventually like to provide 187 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 188 * to help the administrator determine what knobs to tune. 189 */ 190struct mem_cgroup { 191 struct cgroup_subsys_state css; 192 193 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 194 struct mem_cgroup_private_id id; 195 196 /* Accounted resources */ 197 struct page_counter memory; /* Both v1 & v2 */ 198 199 union { 200 struct page_counter swap; /* v2 only */ 201 struct page_counter memsw; /* v1 only */ 202 }; 203 204 /* registered local peak watchers */ 205 struct list_head memory_peaks; 206 struct list_head swap_peaks; 207 spinlock_t peaks_lock; 208 209 /* Range enforcement for interrupt charges */ 210 struct work_struct high_work; 211 212#ifdef CONFIG_ZSWAP 213 unsigned long zswap_max; 214 215 /* 216 * Prevent pages from this memcg from being written back from zswap to 217 * swap, and from being swapped out on zswap store failures. 218 */ 219 bool zswap_writeback; 220#endif 221 222 /* vmpressure notifications */ 223 struct vmpressure vmpressure; 224 225 /* 226 * Should the OOM killer kill all belonging tasks, had it kill one? 227 */ 228 bool oom_group; 229 230 int swappiness; 231 232 /* memory.events and memory.events.local */ 233 struct cgroup_file events_file; 234 struct cgroup_file events_local_file; 235 236 /* handle for "memory.swap.events" */ 237 struct cgroup_file swap_events_file; 238 239 /* memory.stat */ 240 struct memcg_vmstats *vmstats; 241 242 /* memory.events */ 243 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 244 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 245 246#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 247 /* MEMCG_KMEM for nmi context */ 248 atomic_t kmem_stat; 249#endif 250 /* 251 * Hint of reclaim pressure for socket memroy management. Note 252 * that this indicator should NOT be used in legacy cgroup mode 253 * where socket memory is accounted/charged separately. 254 */ 255 u64 socket_pressure; 256#if BITS_PER_LONG < 64 257 seqlock_t socket_pressure_seqlock; 258#endif 259 int kmemcg_id; 260 /* 261 * memcg->objcg is wiped out as a part of the objcg repaprenting 262 * process. memcg->orig_objcg preserves a pointer (and a reference) 263 * to the original objcg until the end of live of memcg. 264 */ 265 struct obj_cgroup __rcu *objcg; 266 struct obj_cgroup *orig_objcg; 267 /* list of inherited objcgs, protected by objcg_lock */ 268 struct list_head objcg_list; 269 270 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 271 272#ifdef CONFIG_CGROUP_WRITEBACK 273 struct list_head cgwb_list; 274 struct wb_domain cgwb_domain; 275 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 276#endif 277 278#ifdef CONFIG_TRANSPARENT_HUGEPAGE 279 struct deferred_split deferred_split_queue; 280#endif 281 282#ifdef CONFIG_LRU_GEN_WALKS_MMU 283 /* per-memcg mm_struct list */ 284 struct lru_gen_mm_list mm_list; 285#endif 286 287#ifdef CONFIG_MEMCG_V1 288 /* Legacy consumer-oriented counters */ 289 struct page_counter kmem; /* v1 only */ 290 struct page_counter tcpmem; /* v1 only */ 291 292 struct memcg1_events_percpu __percpu *events_percpu; 293 294 unsigned long soft_limit; 295 296 /* protected by memcg_oom_lock */ 297 bool oom_lock; 298 int under_oom; 299 300 /* OOM-Killer disable */ 301 int oom_kill_disable; 302 303 /* protect arrays of thresholds */ 304 struct mutex thresholds_lock; 305 306 /* thresholds for memory usage. RCU-protected */ 307 struct mem_cgroup_thresholds thresholds; 308 309 /* thresholds for mem+swap usage. RCU-protected */ 310 struct mem_cgroup_thresholds memsw_thresholds; 311 312 /* For oom notifier event fd */ 313 struct list_head oom_notify; 314 315 /* Legacy tcp memory accounting */ 316 bool tcpmem_active; 317 int tcpmem_pressure; 318 319 /* List of events which userspace want to receive */ 320 struct list_head event_list; 321 spinlock_t event_list_lock; 322#endif /* CONFIG_MEMCG_V1 */ 323 324 struct mem_cgroup_per_node *nodeinfo[]; 325}; 326 327/* 328 * size of first charge trial. 329 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 330 * workload. 331 */ 332#define MEMCG_CHARGE_BATCH 64U 333 334extern struct mem_cgroup *root_mem_cgroup; 335 336enum page_memcg_data_flags { 337 /* page->memcg_data is a pointer to an slabobj_ext vector */ 338 MEMCG_DATA_OBJEXTS = (1UL << 0), 339 /* page has been accounted as a non-slab kernel page */ 340 MEMCG_DATA_KMEM = (1UL << 1), 341 /* the next bit after the last actual flag */ 342 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 343}; 344 345#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS 346#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS 347 348#else /* CONFIG_MEMCG */ 349 350#define __OBJEXTS_ALLOC_FAIL (1UL << 0) 351#define __FIRST_OBJEXT_FLAG (1UL << 0) 352 353#endif /* CONFIG_MEMCG */ 354 355enum objext_flags { 356 /* 357 * Use bit 0 with zero other bits to signal that slabobj_ext vector 358 * failed to allocate. The same bit 0 with valid upper bits means 359 * MEMCG_DATA_OBJEXTS. 360 */ 361 OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL, 362 __OBJEXTS_FLAG_UNUSED = __FIRST_OBJEXT_FLAG, 363 /* the next bit after the last actual flag */ 364 __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), 365}; 366 367#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) 368 369#ifdef CONFIG_MEMCG 370 371static inline bool folio_memcg_kmem(struct folio *folio); 372 373/* 374 * After the initialization objcg->memcg is always pointing at 375 * a valid memcg, but can be atomically swapped to the parent memcg. 376 * 377 * The caller must ensure that the returned memcg won't be released. 378 */ 379static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 380{ 381 lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex)); 382 return READ_ONCE(objcg->memcg); 383} 384 385/* 386 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 387 * @folio: Pointer to the folio. 388 * 389 * Returns a pointer to the memory cgroup associated with the folio, 390 * or NULL. This function assumes that the folio is known to have a 391 * proper memory cgroup pointer. It's not safe to call this function 392 * against some type of folios, e.g. slab folios or ex-slab folios or 393 * kmem folios. 394 */ 395static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 396{ 397 unsigned long memcg_data = folio->memcg_data; 398 399 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 400 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 402 403 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 404} 405 406/* 407 * __folio_objcg - get the object cgroup associated with a kmem folio. 408 * @folio: Pointer to the folio. 409 * 410 * Returns a pointer to the object cgroup associated with the folio, 411 * or NULL. This function assumes that the folio is known to have a 412 * proper object cgroup pointer. It's not safe to call this function 413 * against some type of folios, e.g. slab folios or ex-slab folios or 414 * LRU folios. 415 */ 416static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 417{ 418 unsigned long memcg_data = folio->memcg_data; 419 420 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 421 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 422 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 423 424 return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 425} 426 427/* 428 * folio_memcg - Get the memory cgroup associated with a folio. 429 * @folio: Pointer to the folio. 430 * 431 * Returns a pointer to the memory cgroup associated with the folio, 432 * or NULL. This function assumes that the folio is known to have a 433 * proper memory cgroup pointer. It's not safe to call this function 434 * against some type of folios, e.g. slab folios or ex-slab folios. 435 * 436 * For a non-kmem folio any of the following ensures folio and memcg binding 437 * stability: 438 * 439 * - the folio lock 440 * - LRU isolation 441 * - exclusive reference 442 * 443 * For a kmem folio a caller should hold an rcu read lock to protect memcg 444 * associated with a kmem folio from being released. 445 */ 446static inline struct mem_cgroup *folio_memcg(struct folio *folio) 447{ 448 if (folio_memcg_kmem(folio)) 449 return obj_cgroup_memcg(__folio_objcg(folio)); 450 return __folio_memcg(folio); 451} 452 453/* 454 * folio_memcg_charged - If a folio is charged to a memory cgroup. 455 * @folio: Pointer to the folio. 456 * 457 * Returns true if folio is charged to a memory cgroup, otherwise returns false. 458 */ 459static inline bool folio_memcg_charged(struct folio *folio) 460{ 461 return folio->memcg_data != 0; 462} 463 464/* 465 * folio_memcg_check - Get the memory cgroup associated with a folio. 466 * @folio: Pointer to the folio. 467 * 468 * Returns a pointer to the memory cgroup associated with the folio, 469 * or NULL. This function unlike folio_memcg() can take any folio 470 * as an argument. It has to be used in cases when it's not known if a folio 471 * has an associated memory cgroup pointer or an object cgroups vector or 472 * an object cgroup. 473 * 474 * For a non-kmem folio any of the following ensures folio and memcg binding 475 * stability: 476 * 477 * - the folio lock 478 * - LRU isolation 479 * - exclusive reference 480 * 481 * For a kmem folio a caller should hold an rcu read lock to protect memcg 482 * associated with a kmem folio from being released. 483 */ 484static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 485{ 486 /* 487 * Because folio->memcg_data might be changed asynchronously 488 * for slabs, READ_ONCE() should be used here. 489 */ 490 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 491 492 if (memcg_data & MEMCG_DATA_OBJEXTS) 493 return NULL; 494 495 if (memcg_data & MEMCG_DATA_KMEM) { 496 struct obj_cgroup *objcg; 497 498 objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 499 return obj_cgroup_memcg(objcg); 500 } 501 502 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 503} 504 505static inline struct mem_cgroup *page_memcg_check(struct page *page) 506{ 507 if (PageTail(page)) 508 return NULL; 509 return folio_memcg_check((struct folio *)page); 510} 511 512static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 513{ 514 struct mem_cgroup *memcg; 515 516 rcu_read_lock(); 517retry: 518 memcg = obj_cgroup_memcg(objcg); 519 if (unlikely(!css_tryget(&memcg->css))) 520 goto retry; 521 rcu_read_unlock(); 522 523 return memcg; 524} 525 526/* 527 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 528 * @folio: Pointer to the folio. 529 * 530 * Checks if the folio has MemcgKmem flag set. The caller must ensure 531 * that the folio has an associated memory cgroup. It's not safe to call 532 * this function against some types of folios, e.g. slab folios. 533 */ 534static inline bool folio_memcg_kmem(struct folio *folio) 535{ 536 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 537 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); 538 return folio->memcg_data & MEMCG_DATA_KMEM; 539} 540 541static inline bool PageMemcgKmem(struct page *page) 542{ 543 return folio_memcg_kmem(page_folio(page)); 544} 545 546static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 547{ 548 return (memcg == root_mem_cgroup); 549} 550 551static inline bool mem_cgroup_disabled(void) 552{ 553 return !cgroup_subsys_enabled(memory_cgrp_subsys); 554} 555 556static inline void mem_cgroup_protection(struct mem_cgroup *root, 557 struct mem_cgroup *memcg, 558 unsigned long *min, 559 unsigned long *low, 560 unsigned long *usage) 561{ 562 *min = *low = *usage = 0; 563 564 if (mem_cgroup_disabled()) 565 return; 566 567 *usage = page_counter_read(&memcg->memory); 568 /* 569 * There is no reclaim protection applied to a targeted reclaim. 570 * We are special casing this specific case here because 571 * mem_cgroup_calculate_protection is not robust enough to keep 572 * the protection invariant for calculated effective values for 573 * parallel reclaimers with different reclaim target. This is 574 * especially a problem for tail memcgs (as they have pages on LRU) 575 * which would want to have effective values 0 for targeted reclaim 576 * but a different value for external reclaim. 577 * 578 * Example 579 * Let's have global and A's reclaim in parallel: 580 * | 581 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 582 * |\ 583 * | C (low = 1G, usage = 2.5G) 584 * B (low = 1G, usage = 0.5G) 585 * 586 * For the global reclaim 587 * A.elow = A.low 588 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 589 * C.elow = min(C.usage, C.low) 590 * 591 * With the effective values resetting we have A reclaim 592 * A.elow = 0 593 * B.elow = B.low 594 * C.elow = C.low 595 * 596 * If the global reclaim races with A's reclaim then 597 * B.elow = C.elow = 0 because children_low_usage > A.elow) 598 * is possible and reclaiming B would be violating the protection. 599 * 600 */ 601 if (root == memcg) 602 return; 603 604 *min = READ_ONCE(memcg->memory.emin); 605 *low = READ_ONCE(memcg->memory.elow); 606} 607 608void mem_cgroup_calculate_protection(struct mem_cgroup *root, 609 struct mem_cgroup *memcg); 610 611static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 612 struct mem_cgroup *memcg) 613{ 614 /* 615 * The root memcg doesn't account charges, and doesn't support 616 * protection. The target memcg's protection is ignored, see 617 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 618 */ 619 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 620 memcg == target; 621} 622 623static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 624 struct mem_cgroup *memcg) 625{ 626 if (mem_cgroup_unprotected(target, memcg)) 627 return false; 628 629 return READ_ONCE(memcg->memory.elow) >= 630 page_counter_read(&memcg->memory); 631} 632 633static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 634 struct mem_cgroup *memcg) 635{ 636 if (mem_cgroup_unprotected(target, memcg)) 637 return false; 638 639 return READ_ONCE(memcg->memory.emin) >= 640 page_counter_read(&memcg->memory); 641} 642 643int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 644 645/** 646 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 647 * @folio: Folio to charge. 648 * @mm: mm context of the allocating task. 649 * @gfp: Reclaim mode. 650 * 651 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 652 * pages according to @gfp if necessary. If @mm is NULL, try to 653 * charge to the active memcg. 654 * 655 * Do not use this for folios allocated for swapin. 656 * 657 * Return: 0 on success. Otherwise, an error code is returned. 658 */ 659static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 660 gfp_t gfp) 661{ 662 if (mem_cgroup_disabled()) 663 return 0; 664 return __mem_cgroup_charge(folio, mm, gfp); 665} 666 667int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); 668 669int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 670 gfp_t gfp, swp_entry_t entry); 671 672void __mem_cgroup_uncharge(struct folio *folio); 673 674/** 675 * mem_cgroup_uncharge - Uncharge a folio. 676 * @folio: Folio to uncharge. 677 * 678 * Uncharge a folio previously charged with mem_cgroup_charge(). 679 */ 680static inline void mem_cgroup_uncharge(struct folio *folio) 681{ 682 if (mem_cgroup_disabled()) 683 return; 684 __mem_cgroup_uncharge(folio); 685} 686 687void __mem_cgroup_uncharge_folios(struct folio_batch *folios); 688static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 689{ 690 if (mem_cgroup_disabled()) 691 return; 692 __mem_cgroup_uncharge_folios(folios); 693} 694 695void mem_cgroup_replace_folio(struct folio *old, struct folio *new); 696void mem_cgroup_migrate(struct folio *old, struct folio *new); 697 698/** 699 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 700 * @memcg: memcg of the wanted lruvec 701 * @pgdat: pglist_data 702 * 703 * Returns the lru list vector holding pages for a given @memcg & 704 * @pgdat combination. This can be the node lruvec, if the memory 705 * controller is disabled. 706 */ 707static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 708 struct pglist_data *pgdat) 709{ 710 struct mem_cgroup_per_node *mz; 711 struct lruvec *lruvec; 712 713 if (mem_cgroup_disabled()) { 714 lruvec = &pgdat->__lruvec; 715 goto out; 716 } 717 718 if (!memcg) 719 memcg = root_mem_cgroup; 720 721 mz = memcg->nodeinfo[pgdat->node_id]; 722 lruvec = &mz->lruvec; 723out: 724 /* 725 * Since a node can be onlined after the mem_cgroup was created, 726 * we have to be prepared to initialize lruvec->pgdat here; 727 * and if offlined then reonlined, we need to reinitialize it. 728 */ 729 if (unlikely(lruvec->pgdat != pgdat)) 730 lruvec->pgdat = pgdat; 731 return lruvec; 732} 733 734/** 735 * folio_lruvec - return lruvec for isolating/putting an LRU folio 736 * @folio: Pointer to the folio. 737 * 738 * This function relies on folio->mem_cgroup being stable. 739 */ 740static inline struct lruvec *folio_lruvec(struct folio *folio) 741{ 742 struct mem_cgroup *memcg = folio_memcg(folio); 743 744 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 745 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 746} 747 748struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 749 750struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 751 752struct mem_cgroup *get_mem_cgroup_from_current(void); 753 754struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio); 755 756struct lruvec *folio_lruvec_lock(struct folio *folio); 757struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 758struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 759 unsigned long *flags); 760 761#ifdef CONFIG_DEBUG_VM 762void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 763#else 764static inline 765void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 766{ 767} 768#endif 769 770static inline 771struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 772 return css ? container_of(css, struct mem_cgroup, css) : NULL; 773} 774 775static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 776{ 777 return percpu_ref_tryget(&objcg->refcnt); 778} 779 780static inline void obj_cgroup_get(struct obj_cgroup *objcg) 781{ 782 percpu_ref_get(&objcg->refcnt); 783} 784 785static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 786 unsigned long nr) 787{ 788 percpu_ref_get_many(&objcg->refcnt, nr); 789} 790 791static inline void obj_cgroup_put(struct obj_cgroup *objcg) 792{ 793 if (objcg) 794 percpu_ref_put(&objcg->refcnt); 795} 796 797static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 798{ 799 return !memcg || css_tryget(&memcg->css); 800} 801 802static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 803{ 804 return !memcg || css_tryget_online(&memcg->css); 805} 806 807static inline void mem_cgroup_put(struct mem_cgroup *memcg) 808{ 809 if (memcg) 810 css_put(&memcg->css); 811} 812 813#define mem_cgroup_from_counter(counter, member) \ 814 container_of(counter, struct mem_cgroup, member) 815 816struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 817 struct mem_cgroup *, 818 struct mem_cgroup_reclaim_cookie *); 819void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 820void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 821 int (*)(struct task_struct *, void *), void *arg); 822 823static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg) 824{ 825 if (mem_cgroup_disabled()) 826 return 0; 827 828 return memcg->id.id; 829} 830struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id); 831 832static inline u64 mem_cgroup_id(struct mem_cgroup *memcg) 833{ 834 return memcg ? cgroup_id(memcg->css.cgroup) : 0; 835} 836 837struct mem_cgroup *mem_cgroup_get_from_id(u64 id); 838 839static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 840{ 841 return mem_cgroup_from_css(seq_css(m)); 842} 843 844static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 845{ 846 struct mem_cgroup_per_node *mz; 847 848 if (mem_cgroup_disabled()) 849 return NULL; 850 851 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 852 return mz->memcg; 853} 854 855/** 856 * parent_mem_cgroup - find the accounting parent of a memcg 857 * @memcg: memcg whose parent to find 858 * 859 * Returns the parent memcg, or NULL if this is the root. 860 */ 861static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 862{ 863 return mem_cgroup_from_css(memcg->css.parent); 864} 865 866static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 867 struct mem_cgroup *root) 868{ 869 if (root == memcg) 870 return true; 871 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 872} 873 874static inline bool mm_match_cgroup(struct mm_struct *mm, 875 struct mem_cgroup *memcg) 876{ 877 struct mem_cgroup *task_memcg; 878 bool match = false; 879 880 rcu_read_lock(); 881 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 882 if (task_memcg) 883 match = mem_cgroup_is_descendant(task_memcg, memcg); 884 rcu_read_unlock(); 885 return match; 886} 887 888struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 889ino_t page_cgroup_ino(struct page *page); 890 891static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 892{ 893 if (mem_cgroup_disabled()) 894 return true; 895 return css_is_online(&memcg->css); 896} 897 898void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 899 int zid, int nr_pages); 900 901static inline 902unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 903 enum lru_list lru, int zone_idx) 904{ 905 struct mem_cgroup_per_node *mz; 906 907 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 908 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 909} 910 911void __mem_cgroup_handle_over_high(gfp_t gfp_mask); 912 913static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 914{ 915 if (unlikely(current->memcg_nr_pages_over_high)) 916 __mem_cgroup_handle_over_high(gfp_mask); 917} 918 919unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 920 921void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 922 struct task_struct *p); 923 924void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 925 926struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 927 struct mem_cgroup *oom_domain); 928void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 929 930/* idx can be of type enum memcg_stat_item or node_stat_item */ 931void mod_memcg_state(struct mem_cgroup *memcg, 932 enum memcg_stat_item idx, int val); 933 934static inline void mod_memcg_page_state(struct page *page, 935 enum memcg_stat_item idx, int val) 936{ 937 struct mem_cgroup *memcg; 938 939 if (mem_cgroup_disabled()) 940 return; 941 942 rcu_read_lock(); 943 memcg = folio_memcg(page_folio(page)); 944 if (memcg) 945 mod_memcg_state(memcg, idx, val); 946 rcu_read_unlock(); 947} 948 949unsigned long memcg_events(struct mem_cgroup *memcg, int event); 950unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 951unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item); 952bool memcg_stat_item_valid(int idx); 953bool memcg_vm_event_item_valid(enum vm_event_item idx); 954unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx); 955unsigned long lruvec_page_state_local(struct lruvec *lruvec, 956 enum node_stat_item idx); 957 958void mem_cgroup_flush_stats(struct mem_cgroup *memcg); 959void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); 960 961void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 962 963void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 964 unsigned long count); 965 966static inline void count_memcg_folio_events(struct folio *folio, 967 enum vm_event_item idx, unsigned long nr) 968{ 969 struct mem_cgroup *memcg = folio_memcg(folio); 970 971 if (memcg) 972 count_memcg_events(memcg, idx, nr); 973} 974 975static inline void count_memcg_events_mm(struct mm_struct *mm, 976 enum vm_event_item idx, unsigned long count) 977{ 978 struct mem_cgroup *memcg; 979 980 if (mem_cgroup_disabled()) 981 return; 982 983 rcu_read_lock(); 984 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 985 if (likely(memcg)) 986 count_memcg_events(memcg, idx, count); 987 rcu_read_unlock(); 988} 989 990static inline void count_memcg_event_mm(struct mm_struct *mm, 991 enum vm_event_item idx) 992{ 993 count_memcg_events_mm(mm, idx, 1); 994} 995 996void __memcg_memory_event(struct mem_cgroup *memcg, 997 enum memcg_memory_event event, bool allow_spinning); 998 999static inline void memcg_memory_event(struct mem_cgroup *memcg, 1000 enum memcg_memory_event event) 1001{ 1002 __memcg_memory_event(memcg, event, true); 1003} 1004 1005static inline void memcg_memory_event_mm(struct mm_struct *mm, 1006 enum memcg_memory_event event) 1007{ 1008 struct mem_cgroup *memcg; 1009 1010 if (mem_cgroup_disabled()) 1011 return; 1012 1013 rcu_read_lock(); 1014 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1015 if (likely(memcg)) 1016 memcg_memory_event(memcg, event); 1017 rcu_read_unlock(); 1018} 1019 1020void split_page_memcg(struct page *first, unsigned order); 1021void folio_split_memcg_refs(struct folio *folio, unsigned old_order, 1022 unsigned new_order); 1023 1024static inline u64 cgroup_id_from_mm(struct mm_struct *mm) 1025{ 1026 struct mem_cgroup *memcg; 1027 u64 id; 1028 1029 if (mem_cgroup_disabled()) 1030 return 0; 1031 1032 rcu_read_lock(); 1033 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1034 if (!memcg) 1035 memcg = root_mem_cgroup; 1036 id = cgroup_id(memcg->css.cgroup); 1037 rcu_read_unlock(); 1038 return id; 1039} 1040 1041void mem_cgroup_flush_workqueue(void); 1042 1043extern int mem_cgroup_init(void); 1044#else /* CONFIG_MEMCG */ 1045 1046#define MEM_CGROUP_ID_SHIFT 0 1047 1048#define root_mem_cgroup (NULL) 1049 1050static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1051{ 1052 return NULL; 1053} 1054 1055static inline bool folio_memcg_charged(struct folio *folio) 1056{ 1057 return false; 1058} 1059 1060static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1061{ 1062 return NULL; 1063} 1064 1065static inline struct mem_cgroup *page_memcg_check(struct page *page) 1066{ 1067 return NULL; 1068} 1069 1070static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 1071{ 1072 return NULL; 1073} 1074 1075static inline bool folio_memcg_kmem(struct folio *folio) 1076{ 1077 return false; 1078} 1079 1080static inline bool PageMemcgKmem(struct page *page) 1081{ 1082 return false; 1083} 1084 1085static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1086{ 1087 return true; 1088} 1089 1090static inline bool mem_cgroup_disabled(void) 1091{ 1092 return true; 1093} 1094 1095static inline void memcg_memory_event(struct mem_cgroup *memcg, 1096 enum memcg_memory_event event) 1097{ 1098} 1099 1100static inline void memcg_memory_event_mm(struct mm_struct *mm, 1101 enum memcg_memory_event event) 1102{ 1103} 1104 1105static inline void mem_cgroup_protection(struct mem_cgroup *root, 1106 struct mem_cgroup *memcg, 1107 unsigned long *min, 1108 unsigned long *low, 1109 unsigned long *usage) 1110{ 1111 *min = *low = *usage = 0; 1112} 1113 1114static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1115 struct mem_cgroup *memcg) 1116{ 1117} 1118 1119static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1120 struct mem_cgroup *memcg) 1121{ 1122 return true; 1123} 1124static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1125 struct mem_cgroup *memcg) 1126{ 1127 return false; 1128} 1129 1130static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1131 struct mem_cgroup *memcg) 1132{ 1133 return false; 1134} 1135 1136static inline int mem_cgroup_charge(struct folio *folio, 1137 struct mm_struct *mm, gfp_t gfp) 1138{ 1139 return 0; 1140} 1141 1142static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) 1143{ 1144 return 0; 1145} 1146 1147static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1148 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1149{ 1150 return 0; 1151} 1152 1153static inline void mem_cgroup_uncharge(struct folio *folio) 1154{ 1155} 1156 1157static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 1158{ 1159} 1160 1161static inline void mem_cgroup_replace_folio(struct folio *old, 1162 struct folio *new) 1163{ 1164} 1165 1166static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1167{ 1168} 1169 1170static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1171 struct pglist_data *pgdat) 1172{ 1173 return &pgdat->__lruvec; 1174} 1175 1176static inline struct lruvec *folio_lruvec(struct folio *folio) 1177{ 1178 struct pglist_data *pgdat = folio_pgdat(folio); 1179 return &pgdat->__lruvec; 1180} 1181 1182static inline 1183void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1184{ 1185} 1186 1187static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1188{ 1189 return NULL; 1190} 1191 1192static inline bool mm_match_cgroup(struct mm_struct *mm, 1193 struct mem_cgroup *memcg) 1194{ 1195 return true; 1196} 1197 1198static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1199{ 1200 return NULL; 1201} 1202 1203static inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1204{ 1205 return NULL; 1206} 1207 1208static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 1209{ 1210 return NULL; 1211} 1212 1213static inline 1214struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1215{ 1216 return NULL; 1217} 1218 1219static inline void obj_cgroup_get(struct obj_cgroup *objcg) 1220{ 1221} 1222 1223static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1224{ 1225} 1226 1227static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1228{ 1229 return true; 1230} 1231 1232static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 1233{ 1234 return true; 1235} 1236 1237static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1238{ 1239} 1240 1241static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1242{ 1243 struct pglist_data *pgdat = folio_pgdat(folio); 1244 1245 spin_lock(&pgdat->__lruvec.lru_lock); 1246 return &pgdat->__lruvec; 1247} 1248 1249static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1250{ 1251 struct pglist_data *pgdat = folio_pgdat(folio); 1252 1253 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1254 return &pgdat->__lruvec; 1255} 1256 1257static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1258 unsigned long *flagsp) 1259{ 1260 struct pglist_data *pgdat = folio_pgdat(folio); 1261 1262 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1263 return &pgdat->__lruvec; 1264} 1265 1266static inline struct mem_cgroup * 1267mem_cgroup_iter(struct mem_cgroup *root, 1268 struct mem_cgroup *prev, 1269 struct mem_cgroup_reclaim_cookie *reclaim) 1270{ 1271 return NULL; 1272} 1273 1274static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1275 struct mem_cgroup *prev) 1276{ 1277} 1278 1279static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1280 int (*fn)(struct task_struct *, void *), void *arg) 1281{ 1282} 1283 1284static inline unsigned short mem_cgroup_private_id(struct mem_cgroup *memcg) 1285{ 1286 return 0; 1287} 1288 1289static inline struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id) 1290{ 1291 WARN_ON_ONCE(id); 1292 /* XXX: This should always return root_mem_cgroup */ 1293 return NULL; 1294} 1295 1296static inline u64 mem_cgroup_id(struct mem_cgroup *memcg) 1297{ 1298 return 0; 1299} 1300 1301static inline struct mem_cgroup *mem_cgroup_get_from_id(u64 id) 1302{ 1303 return NULL; 1304} 1305 1306static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1307{ 1308 return NULL; 1309} 1310 1311static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1312{ 1313 return NULL; 1314} 1315 1316static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1317{ 1318 return true; 1319} 1320 1321static inline 1322unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1323 enum lru_list lru, int zone_idx) 1324{ 1325 return 0; 1326} 1327 1328static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1329{ 1330 return 0; 1331} 1332 1333static inline void 1334mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1335{ 1336} 1337 1338static inline void 1339mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1340{ 1341} 1342 1343static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 1344{ 1345} 1346 1347static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1348 struct task_struct *victim, struct mem_cgroup *oom_domain) 1349{ 1350 return NULL; 1351} 1352 1353static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1354{ 1355} 1356 1357static inline void mod_memcg_state(struct mem_cgroup *memcg, 1358 enum memcg_stat_item idx, 1359 int nr) 1360{ 1361} 1362 1363static inline void mod_memcg_page_state(struct page *page, 1364 enum memcg_stat_item idx, int val) 1365{ 1366} 1367 1368static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1369{ 1370 return 0; 1371} 1372 1373static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) 1374{ 1375 return 0; 1376} 1377 1378static inline bool memcg_stat_item_valid(int idx) 1379{ 1380 return false; 1381} 1382 1383static inline bool memcg_vm_event_item_valid(enum vm_event_item idx) 1384{ 1385 return false; 1386} 1387 1388static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1389 enum node_stat_item idx) 1390{ 1391 return node_page_state(lruvec_pgdat(lruvec), idx); 1392} 1393 1394static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1395 enum node_stat_item idx) 1396{ 1397 return node_page_state(lruvec_pgdat(lruvec), idx); 1398} 1399 1400static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 1401{ 1402} 1403 1404static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 1405{ 1406} 1407 1408static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1409 int val) 1410{ 1411 struct page *page = virt_to_head_page(p); 1412 1413 mod_node_page_state(page_pgdat(page), idx, val); 1414} 1415 1416static inline void count_memcg_events(struct mem_cgroup *memcg, 1417 enum vm_event_item idx, 1418 unsigned long count) 1419{ 1420} 1421 1422static inline void count_memcg_folio_events(struct folio *folio, 1423 enum vm_event_item idx, unsigned long nr) 1424{ 1425} 1426 1427static inline void count_memcg_events_mm(struct mm_struct *mm, 1428 enum vm_event_item idx, unsigned long count) 1429{ 1430} 1431 1432static inline 1433void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1434{ 1435} 1436 1437static inline void split_page_memcg(struct page *first, unsigned order) 1438{ 1439} 1440 1441static inline void folio_split_memcg_refs(struct folio *folio, 1442 unsigned old_order, unsigned new_order) 1443{ 1444} 1445 1446static inline u64 cgroup_id_from_mm(struct mm_struct *mm) 1447{ 1448 return 0; 1449} 1450 1451static inline void mem_cgroup_flush_workqueue(void) { } 1452 1453static inline int mem_cgroup_init(void) { return 0; } 1454#endif /* CONFIG_MEMCG */ 1455 1456/* 1457 * Extended information for slab objects stored as an array in page->memcg_data 1458 * if MEMCG_DATA_OBJEXTS is set. 1459 */ 1460struct slabobj_ext { 1461#ifdef CONFIG_MEMCG 1462 struct obj_cgroup *objcg; 1463#endif 1464#ifdef CONFIG_MEM_ALLOC_PROFILING 1465 union codetag_ref ref; 1466#endif 1467} __aligned(8); 1468 1469static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1470{ 1471 struct mem_cgroup *memcg; 1472 1473 memcg = lruvec_memcg(lruvec); 1474 if (!memcg) 1475 return NULL; 1476 memcg = parent_mem_cgroup(memcg); 1477 if (!memcg) 1478 return NULL; 1479 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1480} 1481 1482static inline void unlock_page_lruvec(struct lruvec *lruvec) 1483{ 1484 spin_unlock(&lruvec->lru_lock); 1485} 1486 1487static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1488{ 1489 spin_unlock_irq(&lruvec->lru_lock); 1490} 1491 1492static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1493 unsigned long flags) 1494{ 1495 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1496} 1497 1498/* Test requires a stable folio->memcg binding, see folio_memcg() */ 1499static inline bool folio_matches_lruvec(struct folio *folio, 1500 struct lruvec *lruvec) 1501{ 1502 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1503 lruvec_memcg(lruvec) == folio_memcg(folio); 1504} 1505 1506/* Don't lock again iff page's lruvec locked */ 1507static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1508 struct lruvec *locked_lruvec) 1509{ 1510 if (locked_lruvec) { 1511 if (folio_matches_lruvec(folio, locked_lruvec)) 1512 return locked_lruvec; 1513 1514 unlock_page_lruvec_irq(locked_lruvec); 1515 } 1516 1517 return folio_lruvec_lock_irq(folio); 1518} 1519 1520/* Don't lock again iff folio's lruvec locked */ 1521static inline void folio_lruvec_relock_irqsave(struct folio *folio, 1522 struct lruvec **lruvecp, unsigned long *flags) 1523{ 1524 if (*lruvecp) { 1525 if (folio_matches_lruvec(folio, *lruvecp)) 1526 return; 1527 1528 unlock_page_lruvec_irqrestore(*lruvecp, *flags); 1529 } 1530 1531 *lruvecp = folio_lruvec_lock_irqsave(folio, flags); 1532} 1533 1534#ifdef CONFIG_CGROUP_WRITEBACK 1535 1536struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1537void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1538 unsigned long *pheadroom, unsigned long *pdirty, 1539 unsigned long *pwriteback); 1540 1541void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1542 struct bdi_writeback *wb); 1543 1544static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1545 struct bdi_writeback *wb) 1546{ 1547 struct mem_cgroup *memcg; 1548 1549 if (mem_cgroup_disabled()) 1550 return; 1551 1552 memcg = folio_memcg(folio); 1553 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1554 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1555} 1556 1557void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1558 1559#else /* CONFIG_CGROUP_WRITEBACK */ 1560 1561static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1562{ 1563 return NULL; 1564} 1565 1566static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1567 unsigned long *pfilepages, 1568 unsigned long *pheadroom, 1569 unsigned long *pdirty, 1570 unsigned long *pwriteback) 1571{ 1572} 1573 1574static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1575 struct bdi_writeback *wb) 1576{ 1577} 1578 1579static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1580{ 1581} 1582 1583#endif /* CONFIG_CGROUP_WRITEBACK */ 1584 1585struct sock; 1586#ifdef CONFIG_MEMCG 1587extern struct static_key_false memcg_sockets_enabled_key; 1588#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1589 1590void mem_cgroup_sk_alloc(struct sock *sk); 1591void mem_cgroup_sk_free(struct sock *sk); 1592void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk); 1593bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages, 1594 gfp_t gfp_mask); 1595void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages); 1596 1597#if BITS_PER_LONG < 64 1598static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) 1599{ 1600 u64 val = get_jiffies_64() + HZ; 1601 unsigned long flags; 1602 1603 write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags); 1604 memcg->socket_pressure = val; 1605 write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags); 1606} 1607 1608static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) 1609{ 1610 unsigned int seq; 1611 u64 val; 1612 1613 do { 1614 seq = read_seqbegin(&memcg->socket_pressure_seqlock); 1615 val = memcg->socket_pressure; 1616 } while (read_seqretry(&memcg->socket_pressure_seqlock, seq)); 1617 1618 return val; 1619} 1620#else 1621static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) 1622{ 1623 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); 1624} 1625 1626static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) 1627{ 1628 return READ_ONCE(memcg->socket_pressure); 1629} 1630#endif 1631 1632int alloc_shrinker_info(struct mem_cgroup *memcg); 1633void free_shrinker_info(struct mem_cgroup *memcg); 1634void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1635void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1636 1637static inline int shrinker_id(struct shrinker *shrinker) 1638{ 1639 return shrinker->id; 1640} 1641#else 1642#define mem_cgroup_sockets_enabled 0 1643 1644static inline void mem_cgroup_sk_alloc(struct sock *sk) 1645{ 1646} 1647 1648static inline void mem_cgroup_sk_free(struct sock *sk) 1649{ 1650} 1651 1652static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk) 1653{ 1654} 1655 1656static inline bool mem_cgroup_sk_charge(const struct sock *sk, 1657 unsigned int nr_pages, 1658 gfp_t gfp_mask) 1659{ 1660 return false; 1661} 1662 1663static inline void mem_cgroup_sk_uncharge(const struct sock *sk, 1664 unsigned int nr_pages) 1665{ 1666} 1667 1668static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1669 int nid, int shrinker_id) 1670{ 1671} 1672 1673static inline int shrinker_id(struct shrinker *shrinker) 1674{ 1675 return -1; 1676} 1677#endif 1678 1679#ifdef CONFIG_MEMCG 1680bool mem_cgroup_kmem_disabled(void); 1681int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1682void __memcg_kmem_uncharge_page(struct page *page, int order); 1683 1684/* 1685 * The returned objcg pointer is safe to use without additional 1686 * protection within a scope. The scope is defined either by 1687 * the current task (similar to the "current" global variable) 1688 * or by set_active_memcg() pair. 1689 * Please, use obj_cgroup_get() to get a reference if the pointer 1690 * needs to be used outside of the local scope. 1691 */ 1692struct obj_cgroup *current_obj_cgroup(void); 1693struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); 1694 1695static inline struct obj_cgroup *get_obj_cgroup_from_current(void) 1696{ 1697 struct obj_cgroup *objcg = current_obj_cgroup(); 1698 1699 if (objcg) 1700 obj_cgroup_get(objcg); 1701 1702 return objcg; 1703} 1704 1705int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1706void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1707 1708extern struct static_key_false memcg_bpf_enabled_key; 1709static inline bool memcg_bpf_enabled(void) 1710{ 1711 return static_branch_likely(&memcg_bpf_enabled_key); 1712} 1713 1714extern struct static_key_false memcg_kmem_online_key; 1715 1716static inline bool memcg_kmem_online(void) 1717{ 1718 return static_branch_likely(&memcg_kmem_online_key); 1719} 1720 1721static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1722 int order) 1723{ 1724 if (memcg_kmem_online()) 1725 return __memcg_kmem_charge_page(page, gfp, order); 1726 return 0; 1727} 1728 1729static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1730{ 1731 if (memcg_kmem_online()) 1732 __memcg_kmem_uncharge_page(page, order); 1733} 1734 1735/* 1736 * A helper for accessing memcg's kmem_id, used for getting 1737 * corresponding LRU lists. 1738 */ 1739static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1740{ 1741 return memcg ? memcg->kmemcg_id : -1; 1742} 1743 1744struct mem_cgroup *mem_cgroup_from_virt(void *p); 1745 1746static inline void count_objcg_events(struct obj_cgroup *objcg, 1747 enum vm_event_item idx, 1748 unsigned long count) 1749{ 1750 struct mem_cgroup *memcg; 1751 1752 if (!memcg_kmem_online()) 1753 return; 1754 1755 rcu_read_lock(); 1756 memcg = obj_cgroup_memcg(objcg); 1757 count_memcg_events(memcg, idx, count); 1758 rcu_read_unlock(); 1759} 1760 1761void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask); 1762 1763void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg); 1764 1765static inline bool memcg_is_dying(struct mem_cgroup *memcg) 1766{ 1767 return memcg ? css_is_dying(&memcg->css) : false; 1768} 1769 1770#else 1771static inline bool mem_cgroup_kmem_disabled(void) 1772{ 1773 return true; 1774} 1775 1776static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1777 int order) 1778{ 1779 return 0; 1780} 1781 1782static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1783{ 1784} 1785 1786static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1787 int order) 1788{ 1789 return 0; 1790} 1791 1792static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1793{ 1794} 1795 1796static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 1797{ 1798 return NULL; 1799} 1800 1801static inline bool memcg_bpf_enabled(void) 1802{ 1803 return false; 1804} 1805 1806static inline bool memcg_kmem_online(void) 1807{ 1808 return false; 1809} 1810 1811static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1812{ 1813 return -1; 1814} 1815 1816static inline struct mem_cgroup *mem_cgroup_from_virt(void *p) 1817{ 1818 return NULL; 1819} 1820 1821static inline void count_objcg_events(struct obj_cgroup *objcg, 1822 enum vm_event_item idx, 1823 unsigned long count) 1824{ 1825} 1826 1827static inline ino_t page_cgroup_ino(struct page *page) 1828{ 1829 return 0; 1830} 1831 1832static inline void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, 1833 nodemask_t *mask) 1834{ 1835} 1836 1837static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg) 1838{ 1839} 1840 1841static inline bool memcg_is_dying(struct mem_cgroup *memcg) 1842{ 1843 return false; 1844} 1845#endif /* CONFIG_MEMCG */ 1846 1847#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) 1848bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1849void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1850void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1851bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg); 1852#else 1853static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1854{ 1855 return true; 1856} 1857static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1858 size_t size) 1859{ 1860} 1861static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1862 size_t size) 1863{ 1864} 1865static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 1866{ 1867 /* if zswap is disabled, do not block pages going to the swapping device */ 1868 return true; 1869} 1870#endif 1871 1872 1873/* Cgroup v1-related declarations */ 1874 1875#ifdef CONFIG_MEMCG_V1 1876unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1877 gfp_t gfp_mask, 1878 unsigned long *total_scanned); 1879 1880bool mem_cgroup_oom_synchronize(bool wait); 1881 1882static inline bool task_in_memcg_oom(struct task_struct *p) 1883{ 1884 return p->memcg_in_oom; 1885} 1886 1887static inline void mem_cgroup_enter_user_fault(void) 1888{ 1889 WARN_ON(current->in_user_fault); 1890 current->in_user_fault = 1; 1891} 1892 1893static inline void mem_cgroup_exit_user_fault(void) 1894{ 1895 WARN_ON(!current->in_user_fault); 1896 current->in_user_fault = 0; 1897} 1898 1899void memcg1_swapout(struct folio *folio, swp_entry_t entry); 1900void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); 1901 1902#else /* CONFIG_MEMCG_V1 */ 1903static inline 1904unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1905 gfp_t gfp_mask, 1906 unsigned long *total_scanned) 1907{ 1908 return 0; 1909} 1910 1911static inline bool task_in_memcg_oom(struct task_struct *p) 1912{ 1913 return false; 1914} 1915 1916static inline bool mem_cgroup_oom_synchronize(bool wait) 1917{ 1918 return false; 1919} 1920 1921static inline void mem_cgroup_enter_user_fault(void) 1922{ 1923} 1924 1925static inline void mem_cgroup_exit_user_fault(void) 1926{ 1927} 1928 1929static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) 1930{ 1931} 1932 1933static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) 1934{ 1935} 1936 1937#endif /* CONFIG_MEMCG_V1 */ 1938 1939#endif /* _LINUX_MEMCONTROL_H */