Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 1d51b370a0f8f642f4fc84c795fbedac0fcdbbd2 2392 lines 76 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_MMZONE_H 3#define _LINUX_MMZONE_H 4 5#ifndef __ASSEMBLY__ 6#ifndef __GENERATING_BOUNDS_H 7 8#include <linux/spinlock.h> 9#include <linux/list.h> 10#include <linux/list_nulls.h> 11#include <linux/wait.h> 12#include <linux/bitops.h> 13#include <linux/cache.h> 14#include <linux/threads.h> 15#include <linux/numa.h> 16#include <linux/init.h> 17#include <linux/seqlock.h> 18#include <linux/nodemask.h> 19#include <linux/pageblock-flags.h> 20#include <linux/page-flags-layout.h> 21#include <linux/atomic.h> 22#include <linux/mm_types.h> 23#include <linux/page-flags.h> 24#include <linux/local_lock.h> 25#include <linux/zswap.h> 26#include <linux/sizes.h> 27#include <asm/page.h> 28 29/* Free memory management - zoned buddy allocator. */ 30#ifndef CONFIG_ARCH_FORCE_MAX_ORDER 31#define MAX_PAGE_ORDER 10 32#else 33#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER 34#endif 35#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER) 36 37#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) 38 39#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1) 40 41/* Defines the order for the number of pages that have a migrate type. */ 42#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER 43#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER 44#else 45#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER 46#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */ 47 48/* 49 * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated 50 * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER, 51 * which defines the order for the number of pages that can have a migrate type 52 */ 53#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER) 54#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER 55#endif 56 57/* 58 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 59 * costly to service. That is between allocation orders which should 60 * coalesce naturally under reasonable reclaim pressure and those which 61 * will not. 62 */ 63#define PAGE_ALLOC_COSTLY_ORDER 3 64 65#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS) 66/* 67 * We don't expect any folios that exceed buddy sizes (and consequently 68 * memory sections). 69 */ 70#define MAX_FOLIO_ORDER MAX_PAGE_ORDER 71#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 72/* 73 * Only pages within a single memory section are guaranteed to be 74 * contiguous. By limiting folios to a single memory section, all folio 75 * pages are guaranteed to be contiguous. 76 */ 77#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT 78#elif defined(CONFIG_HUGETLB_PAGE) 79/* 80 * There is no real limit on the folio size. We limit them to the maximum we 81 * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect 82 * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit. 83 */ 84#ifdef CONFIG_64BIT 85#define MAX_FOLIO_ORDER (ilog2(SZ_16G) - PAGE_SHIFT) 86#else 87#define MAX_FOLIO_ORDER (ilog2(SZ_1G) - PAGE_SHIFT) 88#endif 89#else 90/* 91 * Without hugetlb, gigantic folios that are bigger than a single PUD are 92 * currently impossible. 93 */ 94#define MAX_FOLIO_ORDER (PUD_SHIFT - PAGE_SHIFT) 95#endif 96 97#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER) 98 99/* 100 * HugeTLB Vmemmap Optimization (HVO) requires struct pages of the head page to 101 * be naturally aligned with regard to the folio size. 102 * 103 * HVO which is only active if the size of struct page is a power of 2. 104 */ 105#define MAX_FOLIO_VMEMMAP_ALIGN \ 106 (IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) && \ 107 is_power_of_2(sizeof(struct page)) ? \ 108 MAX_FOLIO_NR_PAGES * sizeof(struct page) : 0) 109 110/* 111 * vmemmap optimization (like HVO) is only possible for page orders that fill 112 * two or more pages with struct pages. 113 */ 114#define VMEMMAP_TAIL_MIN_ORDER (ilog2(2 * PAGE_SIZE / sizeof(struct page))) 115#define __NR_VMEMMAP_TAILS (MAX_FOLIO_ORDER - VMEMMAP_TAIL_MIN_ORDER + 1) 116#define NR_VMEMMAP_TAILS (__NR_VMEMMAP_TAILS > 0 ? __NR_VMEMMAP_TAILS : 0) 117 118enum migratetype { 119 MIGRATE_UNMOVABLE, 120 MIGRATE_MOVABLE, 121 MIGRATE_RECLAIMABLE, 122 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 123 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 124#ifdef CONFIG_CMA 125 /* 126 * MIGRATE_CMA migration type is designed to mimic the way 127 * ZONE_MOVABLE works. Only movable pages can be allocated 128 * from MIGRATE_CMA pageblocks and page allocator never 129 * implicitly change migration type of MIGRATE_CMA pageblock. 130 * 131 * The way to use it is to change migratetype of a range of 132 * pageblocks to MIGRATE_CMA which can be done by 133 * __free_pageblock_cma() function. 134 */ 135 MIGRATE_CMA, 136 __MIGRATE_TYPE_END = MIGRATE_CMA, 137#else 138 __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC, 139#endif 140#ifdef CONFIG_MEMORY_ISOLATION 141 MIGRATE_ISOLATE, /* can't allocate from here */ 142#endif 143 MIGRATE_TYPES 144}; 145 146/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 147extern const char * const migratetype_names[MIGRATE_TYPES]; 148 149#ifdef CONFIG_CMA 150# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 151# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 152/* 153 * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio, 154 * so folio_pfn() cannot be used and pfn is needed. 155 */ 156# define is_migrate_cma_folio(folio, pfn) \ 157 (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA) 158#else 159# define is_migrate_cma(migratetype) false 160# define is_migrate_cma_page(_page) false 161# define is_migrate_cma_folio(folio, pfn) false 162#endif 163 164static inline bool is_migrate_movable(int mt) 165{ 166 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 167} 168 169/* 170 * Check whether a migratetype can be merged with another migratetype. 171 * 172 * It is only mergeable when it can fall back to other migratetypes for 173 * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. 174 */ 175static inline bool migratetype_is_mergeable(int mt) 176{ 177 return mt < MIGRATE_PCPTYPES; 178} 179 180#define for_each_migratetype_order(order, type) \ 181 for (order = 0; order < NR_PAGE_ORDERS; order++) \ 182 for (type = 0; type < MIGRATE_TYPES; type++) 183 184extern int page_group_by_mobility_disabled; 185 186#define get_pageblock_migratetype(page) \ 187 get_pfnblock_migratetype(page, page_to_pfn(page)) 188 189#define folio_migratetype(folio) \ 190 get_pageblock_migratetype(&folio->page) 191 192struct free_area { 193 struct list_head free_list[MIGRATE_TYPES]; 194 unsigned long nr_free; 195}; 196 197struct pglist_data; 198 199#ifdef CONFIG_NUMA 200enum numa_stat_item { 201 NUMA_HIT, /* allocated in intended node */ 202 NUMA_MISS, /* allocated in non intended node */ 203 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 204 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 205 NUMA_LOCAL, /* allocation from local node */ 206 NUMA_OTHER, /* allocation from other node */ 207 NR_VM_NUMA_EVENT_ITEMS 208}; 209#else 210#define NR_VM_NUMA_EVENT_ITEMS 0 211#endif 212 213enum zone_stat_item { 214 /* First 128 byte cacheline (assuming 64 bit words) */ 215 NR_FREE_PAGES, 216 NR_FREE_PAGES_BLOCKS, 217 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 218 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 219 NR_ZONE_ACTIVE_ANON, 220 NR_ZONE_INACTIVE_FILE, 221 NR_ZONE_ACTIVE_FILE, 222 NR_ZONE_UNEVICTABLE, 223 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 224 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 225 /* Second 128 byte cacheline */ 226#if IS_ENABLED(CONFIG_ZSMALLOC) 227 NR_ZSPAGES, /* allocated in zsmalloc */ 228#endif 229 NR_FREE_CMA_PAGES, 230#ifdef CONFIG_UNACCEPTED_MEMORY 231 NR_UNACCEPTED, 232#endif 233 NR_VM_ZONE_STAT_ITEMS }; 234 235enum node_stat_item { 236 NR_LRU_BASE, 237 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 238 NR_ACTIVE_ANON, /* " " " " " */ 239 NR_INACTIVE_FILE, /* " " " " " */ 240 NR_ACTIVE_FILE, /* " " " " " */ 241 NR_UNEVICTABLE, /* " " " " " */ 242 NR_SLAB_RECLAIMABLE_B, 243 NR_SLAB_UNRECLAIMABLE_B, 244 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 245 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 246 WORKINGSET_NODES, 247 WORKINGSET_REFAULT_BASE, 248 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, 249 WORKINGSET_REFAULT_FILE, 250 WORKINGSET_ACTIVATE_BASE, 251 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, 252 WORKINGSET_ACTIVATE_FILE, 253 WORKINGSET_RESTORE_BASE, 254 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, 255 WORKINGSET_RESTORE_FILE, 256 WORKINGSET_NODERECLAIM, 257 NR_ANON_MAPPED, /* Mapped anonymous pages */ 258 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 259 only modified from process context */ 260 NR_FILE_PAGES, 261 NR_FILE_DIRTY, 262 NR_WRITEBACK, 263 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 264 NR_SHMEM_THPS, 265 NR_SHMEM_PMDMAPPED, 266 NR_FILE_THPS, 267 NR_FILE_PMDMAPPED, 268 NR_ANON_THPS, 269 NR_VMSCAN_WRITE, 270 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 271 NR_DIRTIED, /* page dirtyings since bootup */ 272 NR_WRITTEN, /* page writings since bootup */ 273 NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ 274 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 275 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ 276 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ 277 NR_VMALLOC, 278 NR_KERNEL_STACK_KB, /* measured in KiB */ 279#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 280 NR_KERNEL_SCS_KB, /* measured in KiB */ 281#endif 282 NR_PAGETABLE, /* used for pagetables */ 283 NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */ 284#ifdef CONFIG_IOMMU_SUPPORT 285 NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ 286#endif 287#ifdef CONFIG_SWAP 288 NR_SWAPCACHE, 289#endif 290#ifdef CONFIG_NUMA_BALANCING 291 PGPROMOTE_SUCCESS, /* promote successfully */ 292 /** 293 * Candidate pages for promotion based on hint fault latency. This 294 * counter is used to control the promotion rate and adjust the hot 295 * threshold. 296 */ 297 PGPROMOTE_CANDIDATE, 298 /** 299 * Not rate-limited (NRL) candidate pages for those can be promoted 300 * without considering hot threshold because of enough free pages in 301 * fast-tier node. These promotions bypass the regular hotness checks 302 * and do NOT influence the promotion rate-limiter or 303 * threshold-adjustment logic. 304 * This is for statistics/monitoring purposes. 305 */ 306 PGPROMOTE_CANDIDATE_NRL, 307#endif 308 /* PGDEMOTE_*: pages demoted */ 309 PGDEMOTE_KSWAPD, 310 PGDEMOTE_DIRECT, 311 PGDEMOTE_KHUGEPAGED, 312 PGDEMOTE_PROACTIVE, 313 PGSTEAL_KSWAPD, 314 PGSTEAL_DIRECT, 315 PGSTEAL_KHUGEPAGED, 316 PGSTEAL_PROACTIVE, 317 PGSTEAL_ANON, 318 PGSTEAL_FILE, 319 PGSCAN_KSWAPD, 320 PGSCAN_DIRECT, 321 PGSCAN_KHUGEPAGED, 322 PGSCAN_PROACTIVE, 323 PGSCAN_ANON, 324 PGSCAN_FILE, 325 PGREFILL, 326#ifdef CONFIG_HUGETLB_PAGE 327 NR_HUGETLB, 328#endif 329 NR_BALLOON_PAGES, 330 NR_KERNEL_FILE_PAGES, 331 NR_GPU_ACTIVE, /* Pages assigned to GPU objects */ 332 NR_GPU_RECLAIM, /* Pages in shrinkable GPU pools */ 333 NR_VM_NODE_STAT_ITEMS 334}; 335 336/* 337 * Returns true if the item should be printed in THPs (/proc/vmstat 338 * currently prints number of anon, file and shmem THPs. But the item 339 * is charged in pages). 340 */ 341static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) 342{ 343 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 344 return false; 345 346 return item == NR_ANON_THPS || 347 item == NR_FILE_THPS || 348 item == NR_SHMEM_THPS || 349 item == NR_SHMEM_PMDMAPPED || 350 item == NR_FILE_PMDMAPPED; 351} 352 353/* 354 * Returns true if the value is measured in bytes (most vmstat values are 355 * measured in pages). This defines the API part, the internal representation 356 * might be different. 357 */ 358static __always_inline bool vmstat_item_in_bytes(int idx) 359{ 360 /* 361 * Global and per-node slab counters track slab pages. 362 * It's expected that changes are multiples of PAGE_SIZE. 363 * Internally values are stored in pages. 364 * 365 * Per-memcg and per-lruvec counters track memory, consumed 366 * by individual slab objects. These counters are actually 367 * byte-precise. 368 */ 369 return (idx == NR_SLAB_RECLAIMABLE_B || 370 idx == NR_SLAB_UNRECLAIMABLE_B); 371} 372 373/* 374 * We do arithmetic on the LRU lists in various places in the code, 375 * so it is important to keep the active lists LRU_ACTIVE higher in 376 * the array than the corresponding inactive lists, and to keep 377 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 378 * 379 * This has to be kept in sync with the statistics in zone_stat_item 380 * above and the descriptions in vmstat_text in mm/vmstat.c 381 */ 382#define LRU_BASE 0 383#define LRU_ACTIVE 1 384#define LRU_FILE 2 385 386enum lru_list { 387 LRU_INACTIVE_ANON = LRU_BASE, 388 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 389 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 390 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 391 LRU_UNEVICTABLE, 392 NR_LRU_LISTS 393}; 394 395enum vmscan_throttle_state { 396 VMSCAN_THROTTLE_WRITEBACK, 397 VMSCAN_THROTTLE_ISOLATED, 398 VMSCAN_THROTTLE_NOPROGRESS, 399 VMSCAN_THROTTLE_CONGESTED, 400 NR_VMSCAN_THROTTLE, 401}; 402 403#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 404 405#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 406 407static inline bool is_file_lru(enum lru_list lru) 408{ 409 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 410} 411 412static inline bool is_active_lru(enum lru_list lru) 413{ 414 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 415} 416 417#define WORKINGSET_ANON 0 418#define WORKINGSET_FILE 1 419#define ANON_AND_FILE 2 420 421enum lruvec_flags { 422 /* 423 * An lruvec has many dirty pages backed by a congested BDI: 424 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim. 425 * It can be cleared by cgroup reclaim or kswapd. 426 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim. 427 * It can only be cleared by kswapd. 428 * 429 * Essentially, kswapd can unthrottle an lruvec throttled by cgroup 430 * reclaim, but not vice versa. This only applies to the root cgroup. 431 * The goal is to prevent cgroup reclaim on the root cgroup (e.g. 432 * memory.reclaim) to unthrottle an unbalanced node (that was throttled 433 * by kswapd). 434 */ 435 LRUVEC_CGROUP_CONGESTED, 436 LRUVEC_NODE_CONGESTED, 437}; 438 439#endif /* !__GENERATING_BOUNDS_H */ 440 441/* 442 * Evictable folios are divided into multiple generations. The youngest and the 443 * oldest generation numbers, max_seq and min_seq, are monotonically increasing. 444 * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An 445 * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the 446 * corresponding generation. The gen counter in folio->flags stores gen+1 while 447 * a folio is on one of lrugen->folios[]. Otherwise it stores 0. 448 * 449 * After a folio is faulted in, the aging needs to check the accessed bit at 450 * least twice before handing this folio over to the eviction. The first check 451 * clears the accessed bit from the initial fault; the second check makes sure 452 * this folio hasn't been used since then. This process, AKA second chance, 453 * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI 454 * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two 455 * generations are considered active; the rest of generations, if they exist, 456 * are considered inactive. See lru_gen_is_active(). 457 * 458 * PG_active is always cleared while a folio is on one of lrugen->folios[] so 459 * that the sliding window needs not to worry about it. And it's set again when 460 * a folio considered active is isolated for non-reclaiming purposes, e.g., 461 * migration. See lru_gen_add_folio() and lru_gen_del_folio(). 462 * 463 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the 464 * number of categories of the active/inactive LRU when keeping track of 465 * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits 466 * in folio->flags, masked by LRU_GEN_MASK. 467 */ 468#define MIN_NR_GENS 2U 469#define MAX_NR_GENS 4U 470 471/* 472 * Each generation is divided into multiple tiers. A folio accessed N times 473 * through file descriptors is in tier order_base_2(N). A folio in the first 474 * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page 475 * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by 476 * PG_workingset. A folio in any other tier (1<N<5) between the first and last 477 * is marked by additional bits of LRU_REFS_WIDTH in folio->flags. 478 * 479 * In contrast to moving across generations which requires the LRU lock, moving 480 * across tiers only involves atomic operations on folio->flags and therefore 481 * has a negligible cost in the buffered access path. In the eviction path, 482 * comparisons of refaulted/(evicted+protected) from the first tier and the rest 483 * infer whether folios accessed multiple times through file descriptors are 484 * statistically hot and thus worth protecting. 485 * 486 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the 487 * number of categories of the active/inactive LRU when keeping track of 488 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in 489 * folio->flags, masked by LRU_REFS_MASK. 490 */ 491#define MAX_NR_TIERS 4U 492 493#ifndef __GENERATING_BOUNDS_H 494 495#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) 496#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) 497 498/* 499 * For folios accessed multiple times through file descriptors, 500 * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags 501 * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its 502 * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily 503 * promoted into the second oldest generation in the eviction path. And when 504 * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that 505 * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is 506 * only valid when PG_referenced is set. 507 * 508 * For folios accessed multiple times through page tables, folio_update_gen() 509 * from a page table walk or lru_gen_set_refs() from a rmap walk sets 510 * PG_referenced after the accessed bit is cleared for the first time. 511 * Thereafter, those two paths set PG_workingset and promote folios to the 512 * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears 513 * PG_referenced. Note that for this case, LRU_REFS_MASK is not used. 514 * 515 * For both cases above, after PG_workingset is set on a folio, it remains until 516 * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It 517 * can be set again if lru_gen_test_recent() returns true upon a refault. 518 */ 519#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced)) 520 521struct lruvec; 522struct page_vma_mapped_walk; 523 524#ifdef CONFIG_LRU_GEN 525 526enum { 527 LRU_GEN_ANON, 528 LRU_GEN_FILE, 529}; 530 531enum { 532 LRU_GEN_CORE, 533 LRU_GEN_MM_WALK, 534 LRU_GEN_NONLEAF_YOUNG, 535 NR_LRU_GEN_CAPS 536}; 537 538#define MIN_LRU_BATCH BITS_PER_LONG 539#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) 540 541/* whether to keep historical stats from evicted generations */ 542#ifdef CONFIG_LRU_GEN_STATS 543#define NR_HIST_GENS MAX_NR_GENS 544#else 545#define NR_HIST_GENS 1U 546#endif 547 548/* 549 * The youngest generation number is stored in max_seq for both anon and file 550 * types as they are aged on an equal footing. The oldest generation numbers are 551 * stored in min_seq[] separately for anon and file types so that they can be 552 * incremented independently. Ideally min_seq[] are kept in sync when both anon 553 * and file types are evictable. However, to adapt to situations like extreme 554 * swappiness, they are allowed to be out of sync by at most 555 * MAX_NR_GENS-MIN_NR_GENS-1. 556 * 557 * The number of pages in each generation is eventually consistent and therefore 558 * can be transiently negative when reset_batch_size() is pending. 559 */ 560struct lru_gen_folio { 561 /* the aging increments the youngest generation number */ 562 unsigned long max_seq; 563 /* the eviction increments the oldest generation numbers */ 564 unsigned long min_seq[ANON_AND_FILE]; 565 /* the birth time of each generation in jiffies */ 566 unsigned long timestamps[MAX_NR_GENS]; 567 /* the multi-gen LRU lists, lazily sorted on eviction */ 568 struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; 569 /* the multi-gen LRU sizes, eventually consistent */ 570 long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; 571 /* the exponential moving average of refaulted */ 572 unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; 573 /* the exponential moving average of evicted+protected */ 574 unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; 575 /* can only be modified under the LRU lock */ 576 unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; 577 /* can be modified without holding the LRU lock */ 578 atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; 579 atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; 580 /* whether the multi-gen LRU is enabled */ 581 bool enabled; 582 /* the memcg generation this lru_gen_folio belongs to */ 583 u8 gen; 584 /* the list segment this lru_gen_folio belongs to */ 585 u8 seg; 586 /* per-node lru_gen_folio list for global reclaim */ 587 struct hlist_nulls_node list; 588}; 589 590enum { 591 MM_LEAF_TOTAL, /* total leaf entries */ 592 MM_LEAF_YOUNG, /* young leaf entries */ 593 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */ 594 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */ 595 NR_MM_STATS 596}; 597 598/* double-buffering Bloom filters */ 599#define NR_BLOOM_FILTERS 2 600 601struct lru_gen_mm_state { 602 /* synced with max_seq after each iteration */ 603 unsigned long seq; 604 /* where the current iteration continues after */ 605 struct list_head *head; 606 /* where the last iteration ended before */ 607 struct list_head *tail; 608 /* Bloom filters flip after each iteration */ 609 unsigned long *filters[NR_BLOOM_FILTERS]; 610 /* the mm stats for debugging */ 611 unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; 612}; 613 614struct lru_gen_mm_walk { 615 /* the lruvec under reclaim */ 616 struct lruvec *lruvec; 617 /* max_seq from lru_gen_folio: can be out of date */ 618 unsigned long seq; 619 /* the next address within an mm to scan */ 620 unsigned long next_addr; 621 /* to batch promoted pages */ 622 int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; 623 /* to batch the mm stats */ 624 int mm_stats[NR_MM_STATS]; 625 /* total batched items */ 626 int batched; 627 int swappiness; 628 bool force_scan; 629}; 630 631/* 632 * For each node, memcgs are divided into two generations: the old and the 633 * young. For each generation, memcgs are randomly sharded into multiple bins 634 * to improve scalability. For each bin, the hlist_nulls is virtually divided 635 * into three segments: the head, the tail and the default. 636 * 637 * An onlining memcg is added to the tail of a random bin in the old generation. 638 * The eviction starts at the head of a random bin in the old generation. The 639 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes 640 * the old generation, is incremented when all its bins become empty. 641 * 642 * There are four operations: 643 * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its 644 * current generation (old or young) and updates its "seg" to "head"; 645 * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its 646 * current generation (old or young) and updates its "seg" to "tail"; 647 * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old 648 * generation, updates its "gen" to "old" and resets its "seg" to "default"; 649 * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the 650 * young generation, updates its "gen" to "young" and resets its "seg" to 651 * "default". 652 * 653 * The events that trigger the above operations are: 654 * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD; 655 * 2. The first attempt to reclaim a memcg below low, which triggers 656 * MEMCG_LRU_TAIL; 657 * 3. The first attempt to reclaim a memcg offlined or below reclaimable size 658 * threshold, which triggers MEMCG_LRU_TAIL; 659 * 4. The second attempt to reclaim a memcg offlined or below reclaimable size 660 * threshold, which triggers MEMCG_LRU_YOUNG; 661 * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG; 662 * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG; 663 * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD. 664 * 665 * Notes: 666 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing 667 * of their max_seq counters ensures the eventual fairness to all eligible 668 * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter(). 669 * 2. There are only two valid generations: old (seq) and young (seq+1). 670 * MEMCG_NR_GENS is set to three so that when reading the generation counter 671 * locklessly, a stale value (seq-1) does not wraparound to young. 672 */ 673#define MEMCG_NR_GENS 3 674#define MEMCG_NR_BINS 8 675 676struct lru_gen_memcg { 677 /* the per-node memcg generation counter */ 678 unsigned long seq; 679 /* each memcg has one lru_gen_folio per node */ 680 unsigned long nr_memcgs[MEMCG_NR_GENS]; 681 /* per-node lru_gen_folio list for global reclaim */ 682 struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS]; 683 /* protects the above */ 684 spinlock_t lock; 685}; 686 687void lru_gen_init_pgdat(struct pglist_data *pgdat); 688void lru_gen_init_lruvec(struct lruvec *lruvec); 689bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw, unsigned int nr); 690 691void lru_gen_init_memcg(struct mem_cgroup *memcg); 692void lru_gen_exit_memcg(struct mem_cgroup *memcg); 693void lru_gen_online_memcg(struct mem_cgroup *memcg); 694void lru_gen_offline_memcg(struct mem_cgroup *memcg); 695void lru_gen_release_memcg(struct mem_cgroup *memcg); 696void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); 697 698#else /* !CONFIG_LRU_GEN */ 699 700static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) 701{ 702} 703 704static inline void lru_gen_init_lruvec(struct lruvec *lruvec) 705{ 706} 707 708static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw, 709 unsigned int nr) 710{ 711 return false; 712} 713 714static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) 715{ 716} 717 718static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) 719{ 720} 721 722static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) 723{ 724} 725 726static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) 727{ 728} 729 730static inline void lru_gen_release_memcg(struct mem_cgroup *memcg) 731{ 732} 733 734static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) 735{ 736} 737 738#endif /* CONFIG_LRU_GEN */ 739 740struct lruvec { 741 struct list_head lists[NR_LRU_LISTS]; 742 /* per lruvec lru_lock for memcg */ 743 spinlock_t lru_lock; 744 /* 745 * These track the cost of reclaiming one LRU - file or anon - 746 * over the other. As the observed cost of reclaiming one LRU 747 * increases, the reclaim scan balance tips toward the other. 748 */ 749 unsigned long anon_cost; 750 unsigned long file_cost; 751 /* Non-resident age, driven by LRU movement */ 752 atomic_long_t nonresident_age; 753 /* Refaults at the time of last reclaim cycle */ 754 unsigned long refaults[ANON_AND_FILE]; 755 /* Various lruvec state flags (enum lruvec_flags) */ 756 unsigned long flags; 757#ifdef CONFIG_LRU_GEN 758 /* evictable pages divided into generations */ 759 struct lru_gen_folio lrugen; 760#ifdef CONFIG_LRU_GEN_WALKS_MMU 761 /* to concurrently iterate lru_gen_mm_list */ 762 struct lru_gen_mm_state mm_state; 763#endif 764#endif /* CONFIG_LRU_GEN */ 765#ifdef CONFIG_MEMCG 766 struct pglist_data *pgdat; 767#endif 768 struct zswap_lruvec_state zswap_lruvec_state; 769}; 770 771/* Isolate for asynchronous migration */ 772#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 773/* Isolate unevictable pages */ 774#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 775 776/* LRU Isolation modes. */ 777typedef unsigned __bitwise isolate_mode_t; 778 779enum zone_watermarks { 780 WMARK_MIN, 781 WMARK_LOW, 782 WMARK_HIGH, 783 WMARK_PROMO, 784 NR_WMARK 785}; 786 787/* 788 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists 789 * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list 790 * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE. 791 */ 792#ifdef CONFIG_TRANSPARENT_HUGEPAGE 793#define NR_PCP_THP 2 794#else 795#define NR_PCP_THP 0 796#endif 797#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) 798#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) 799 800/* 801 * Flags used in pcp->flags field. 802 * 803 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the 804 * previous page freeing. To avoid to drain PCP for an accident 805 * high-order page freeing. 806 * 807 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before 808 * draining PCP for consecutive high-order pages freeing without 809 * allocation if data cache slice of CPU is large enough. To reduce 810 * zone lock contention and keep cache-hot pages reusing. 811 */ 812#define PCPF_PREV_FREE_HIGH_ORDER BIT(0) 813#define PCPF_FREE_HIGH_BATCH BIT(1) 814 815struct per_cpu_pages { 816 spinlock_t lock; /* Protects lists field */ 817 int count; /* number of pages in the list */ 818 int high; /* high watermark, emptying needed */ 819 int high_min; /* min high watermark */ 820 int high_max; /* max high watermark */ 821 int batch; /* chunk size for buddy add/remove */ 822 u8 flags; /* protected by pcp->lock */ 823 u8 alloc_factor; /* batch scaling factor during allocate */ 824#ifdef CONFIG_NUMA 825 u8 expire; /* When 0, remote pagesets are drained */ 826#endif 827 short free_count; /* consecutive free count */ 828 829 /* Lists of pages, one per migrate type stored on the pcp-lists */ 830 struct list_head lists[NR_PCP_LISTS]; 831} ____cacheline_aligned_in_smp; 832 833struct per_cpu_zonestat { 834#ifdef CONFIG_SMP 835 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 836 s8 stat_threshold; 837#endif 838#ifdef CONFIG_NUMA 839 /* 840 * Low priority inaccurate counters that are only folded 841 * on demand. Use a large type to avoid the overhead of 842 * folding during refresh_cpu_vm_stats. 843 */ 844 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; 845#endif 846}; 847 848struct per_cpu_nodestat { 849 s8 stat_threshold; 850 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 851}; 852 853#endif /* !__GENERATING_BOUNDS.H */ 854 855enum zone_type { 856 /* 857 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able 858 * to DMA to all of the addressable memory (ZONE_NORMAL). 859 * On architectures where this area covers the whole 32 bit address 860 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller 861 * DMA addressing constraints. This distinction is important as a 32bit 862 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit 863 * platforms may need both zones as they support peripherals with 864 * different DMA addressing limitations. 865 */ 866#ifdef CONFIG_ZONE_DMA 867 ZONE_DMA, 868#endif 869#ifdef CONFIG_ZONE_DMA32 870 ZONE_DMA32, 871#endif 872 /* 873 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 874 * performed on pages in ZONE_NORMAL if the DMA devices support 875 * transfers to all addressable memory. 876 */ 877 ZONE_NORMAL, 878#ifdef CONFIG_HIGHMEM 879 /* 880 * A memory area that is only addressable by the kernel through 881 * mapping portions into its own address space. This is for example 882 * used by i386 to allow the kernel to address the memory beyond 883 * 900MB. The kernel will set up special mappings (page 884 * table entries on i386) for each page that the kernel needs to 885 * access. 886 */ 887 ZONE_HIGHMEM, 888#endif 889 /* 890 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains 891 * movable pages with few exceptional cases described below. Main use 892 * cases for ZONE_MOVABLE are to make memory offlining/unplug more 893 * likely to succeed, and to locally limit unmovable allocations - e.g., 894 * to increase the number of THP/huge pages. Notable special cases are: 895 * 896 * 1. Pinned pages: (long-term) pinning of movable pages might 897 * essentially turn such pages unmovable. Therefore, we do not allow 898 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and 899 * faulted, they come from the right zone right away. However, it is 900 * still possible that address space already has pages in 901 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has 902 * touches that memory before pinning). In such case we migrate them 903 * to a different zone. When migration fails - pinning fails. 904 * 2. memblock allocations: kernelcore/movablecore setups might create 905 * situations where ZONE_MOVABLE contains unmovable allocations 906 * after boot. Memory offlining and allocations fail early. 907 * 3. Memory holes: kernelcore/movablecore setups might create very rare 908 * situations where ZONE_MOVABLE contains memory holes after boot, 909 * for example, if we have sections that are only partially 910 * populated. Memory offlining and allocations fail early. 911 * 4. PG_hwpoison pages: while poisoned pages can be skipped during 912 * memory offlining, such pages cannot be allocated. 913 * 5. Unmovable PG_offline pages: in paravirtualized environments, 914 * hotplugged memory blocks might only partially be managed by the 915 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The 916 * parts not manged by the buddy are unmovable PG_offline pages. In 917 * some cases (virtio-mem), such pages can be skipped during 918 * memory offlining, however, cannot be moved/allocated. These 919 * techniques might use alloc_contig_range() to hide previously 920 * exposed pages from the buddy again (e.g., to implement some sort 921 * of memory unplug in virtio-mem). 922 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create 923 * situations where ZERO_PAGE(0) which is allocated differently 924 * on different platforms may end up in a movable zone. ZERO_PAGE(0) 925 * cannot be migrated. 926 * 7. Memory-hotplug: when using memmap_on_memory and onlining the 927 * memory to the MOVABLE zone, the vmemmap pages are also placed in 928 * such zone. Such pages cannot be really moved around as they are 929 * self-stored in the range, but they are treated as movable when 930 * the range they describe is about to be offlined. 931 * 932 * In general, no unmovable allocations that degrade memory offlining 933 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) 934 * have to expect that migrating pages in ZONE_MOVABLE can fail (even 935 * if has_unmovable_pages() states that there are no unmovable pages, 936 * there can be false negatives). 937 */ 938 ZONE_MOVABLE, 939#ifdef CONFIG_ZONE_DEVICE 940 ZONE_DEVICE, 941#endif 942 __MAX_NR_ZONES 943 944}; 945 946#ifndef __GENERATING_BOUNDS_H 947 948#define ASYNC_AND_SYNC 2 949 950struct zone { 951 /* Read-mostly fields */ 952 953 /* zone watermarks, access with *_wmark_pages(zone) macros */ 954 unsigned long _watermark[NR_WMARK]; 955 unsigned long watermark_boost; 956 957 unsigned long nr_reserved_highatomic; 958 unsigned long nr_free_highatomic; 959 960 /* 961 * We don't know if the memory that we're going to allocate will be 962 * freeable or/and it will be released eventually, so to avoid totally 963 * wasting several GB of ram we must reserve some of the lower zone 964 * memory (otherwise we risk to run OOM on the lower zones despite 965 * there being tons of freeable ram on the higher zones). This array is 966 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 967 * changes. 968 */ 969 long lowmem_reserve[MAX_NR_ZONES]; 970 971#ifdef CONFIG_NUMA 972 int node; 973#endif 974 struct pglist_data *zone_pgdat; 975 struct per_cpu_pages __percpu *per_cpu_pageset; 976 struct per_cpu_zonestat __percpu *per_cpu_zonestats; 977 /* 978 * the high and batch values are copied to individual pagesets for 979 * faster access 980 */ 981 int pageset_high_min; 982 int pageset_high_max; 983 int pageset_batch; 984 985#ifndef CONFIG_SPARSEMEM 986 /* 987 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 988 * In SPARSEMEM, this map is stored in struct mem_section 989 */ 990 unsigned long *pageblock_flags; 991#endif /* CONFIG_SPARSEMEM */ 992 993 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 994 unsigned long zone_start_pfn; 995 996 /* 997 * spanned_pages is the total pages spanned by the zone, including 998 * holes, which is calculated as: 999 * spanned_pages = zone_end_pfn - zone_start_pfn; 1000 * 1001 * present_pages is physical pages existing within the zone, which 1002 * is calculated as: 1003 * present_pages = spanned_pages - absent_pages(pages in holes); 1004 * 1005 * present_early_pages is present pages existing within the zone 1006 * located on memory available since early boot, excluding hotplugged 1007 * memory. 1008 * 1009 * managed_pages is present pages managed by the buddy system, which 1010 * is calculated as (reserved_pages includes pages allocated by the 1011 * bootmem allocator): 1012 * managed_pages = present_pages - reserved_pages; 1013 * 1014 * cma pages is present pages that are assigned for CMA use 1015 * (MIGRATE_CMA). 1016 * 1017 * So present_pages may be used by memory hotplug or memory power 1018 * management logic to figure out unmanaged pages by checking 1019 * (present_pages - managed_pages). And managed_pages should be used 1020 * by page allocator and vm scanner to calculate all kinds of watermarks 1021 * and thresholds. 1022 * 1023 * Locking rules: 1024 * 1025 * zone_start_pfn and spanned_pages are protected by span_seqlock. 1026 * It is a seqlock because it has to be read outside of zone->lock, 1027 * and it is done in the main allocator path. But, it is written 1028 * quite infrequently. 1029 * 1030 * The span_seq lock is declared along with zone->lock because it is 1031 * frequently read in proximity to zone->lock. It's good to 1032 * give them a chance of being in the same cacheline. 1033 * 1034 * Write access to present_pages at runtime should be protected by 1035 * mem_hotplug_begin/done(). Any reader who can't tolerant drift of 1036 * present_pages should use get_online_mems() to get a stable value. 1037 */ 1038 atomic_long_t managed_pages; 1039 unsigned long spanned_pages; 1040 unsigned long present_pages; 1041#if defined(CONFIG_MEMORY_HOTPLUG) 1042 unsigned long present_early_pages; 1043#endif 1044#ifdef CONFIG_CMA 1045 unsigned long cma_pages; 1046#endif 1047 1048 const char *name; 1049 1050#ifdef CONFIG_MEMORY_ISOLATION 1051 /* 1052 * Number of isolated pageblock. It is used to solve incorrect 1053 * freepage counting problem due to racy retrieving migratetype 1054 * of pageblock. Protected by zone->lock. 1055 */ 1056 unsigned long nr_isolate_pageblock; 1057#endif 1058 1059#ifdef CONFIG_MEMORY_HOTPLUG 1060 /* see spanned/present_pages for more description */ 1061 seqlock_t span_seqlock; 1062#endif 1063 1064 int initialized; 1065 1066 /* Write-intensive fields used from the page allocator */ 1067 CACHELINE_PADDING(_pad1_); 1068 1069 /* free areas of different sizes */ 1070 struct free_area free_area[NR_PAGE_ORDERS]; 1071 1072#ifdef CONFIG_UNACCEPTED_MEMORY 1073 /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */ 1074 struct list_head unaccepted_pages; 1075 1076 /* To be called once the last page in the zone is accepted */ 1077 struct work_struct unaccepted_cleanup; 1078#endif 1079 1080 /* zone flags, see below */ 1081 unsigned long flags; 1082 1083 /* Primarily protects free_area */ 1084 spinlock_t lock; 1085 1086 /* Pages to be freed when next trylock succeeds */ 1087 struct llist_head trylock_free_pages; 1088 1089 /* Write-intensive fields used by compaction and vmstats. */ 1090 CACHELINE_PADDING(_pad2_); 1091 1092 /* 1093 * When free pages are below this point, additional steps are taken 1094 * when reading the number of free pages to avoid per-cpu counter 1095 * drift allowing watermarks to be breached 1096 */ 1097 unsigned long percpu_drift_mark; 1098 1099#if defined CONFIG_COMPACTION || defined CONFIG_CMA 1100 /* pfn where compaction free scanner should start */ 1101 unsigned long compact_cached_free_pfn; 1102 /* pfn where compaction migration scanner should start */ 1103 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; 1104 unsigned long compact_init_migrate_pfn; 1105 unsigned long compact_init_free_pfn; 1106#endif 1107 1108#ifdef CONFIG_COMPACTION 1109 /* 1110 * On compaction failure, 1<<compact_defer_shift compactions 1111 * are skipped before trying again. The number attempted since 1112 * last failure is tracked with compact_considered. 1113 * compact_order_failed is the minimum compaction failed order. 1114 */ 1115 unsigned int compact_considered; 1116 unsigned int compact_defer_shift; 1117 int compact_order_failed; 1118#endif 1119 1120#if defined CONFIG_COMPACTION || defined CONFIG_CMA 1121 /* Set to true when the PG_migrate_skip bits should be cleared */ 1122 bool compact_blockskip_flush; 1123#endif 1124 1125 bool contiguous; 1126 1127 CACHELINE_PADDING(_pad3_); 1128 /* Zone statistics */ 1129 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 1130 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; 1131#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 1132 struct page *vmemmap_tails[NR_VMEMMAP_TAILS]; 1133#endif 1134} ____cacheline_internodealigned_in_smp; 1135 1136enum pgdat_flags { 1137 PGDAT_WRITEBACK, /* reclaim scanning has recently found 1138 * many pages under writeback 1139 */ 1140 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 1141}; 1142 1143enum zone_flags { 1144 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 1145 * Cleared when kswapd is woken. 1146 */ 1147 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ 1148 ZONE_BELOW_HIGH, /* zone is below high watermark. */ 1149}; 1150 1151static inline unsigned long wmark_pages(const struct zone *z, 1152 enum zone_watermarks w) 1153{ 1154 return z->_watermark[w] + z->watermark_boost; 1155} 1156 1157static inline unsigned long min_wmark_pages(const struct zone *z) 1158{ 1159 return wmark_pages(z, WMARK_MIN); 1160} 1161 1162static inline unsigned long low_wmark_pages(const struct zone *z) 1163{ 1164 return wmark_pages(z, WMARK_LOW); 1165} 1166 1167static inline unsigned long high_wmark_pages(const struct zone *z) 1168{ 1169 return wmark_pages(z, WMARK_HIGH); 1170} 1171 1172static inline unsigned long promo_wmark_pages(const struct zone *z) 1173{ 1174 return wmark_pages(z, WMARK_PROMO); 1175} 1176 1177static inline unsigned long zone_managed_pages(const struct zone *zone) 1178{ 1179 return (unsigned long)atomic_long_read(&zone->managed_pages); 1180} 1181 1182static inline unsigned long zone_cma_pages(struct zone *zone) 1183{ 1184#ifdef CONFIG_CMA 1185 return zone->cma_pages; 1186#else 1187 return 0; 1188#endif 1189} 1190 1191static inline unsigned long zone_end_pfn(const struct zone *zone) 1192{ 1193 return zone->zone_start_pfn + zone->spanned_pages; 1194} 1195 1196static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 1197{ 1198 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 1199} 1200 1201static inline bool zone_is_initialized(const struct zone *zone) 1202{ 1203 return zone->initialized; 1204} 1205 1206static inline bool zone_is_empty(const struct zone *zone) 1207{ 1208 return zone->spanned_pages == 0; 1209} 1210 1211#ifndef BUILD_VDSO32_64 1212/* 1213 * The zone field is never updated after free_area_init_core() 1214 * sets it, so none of the operations on it need to be atomic. 1215 */ 1216 1217/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 1218#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 1219#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 1220#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 1221#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 1222#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) 1223#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH) 1224#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH) 1225 1226/* 1227 * Define the bit shifts to access each section. For non-existent 1228 * sections we define the shift as 0; that plus a 0 mask ensures 1229 * the compiler will optimise away reference to them. 1230 */ 1231#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 1232#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 1233#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 1234#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 1235#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) 1236 1237/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 1238#ifdef NODE_NOT_IN_PAGE_FLAGS 1239#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 1240#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ 1241 SECTIONS_PGOFF : ZONES_PGOFF) 1242#else 1243#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 1244#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ 1245 NODES_PGOFF : ZONES_PGOFF) 1246#endif 1247 1248#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 1249 1250#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 1251#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 1252#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 1253#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 1254#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) 1255#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 1256 1257static inline enum zone_type memdesc_zonenum(memdesc_flags_t flags) 1258{ 1259 ASSERT_EXCLUSIVE_BITS(flags.f, ZONES_MASK << ZONES_PGSHIFT); 1260 return (flags.f >> ZONES_PGSHIFT) & ZONES_MASK; 1261} 1262 1263static inline enum zone_type page_zonenum(const struct page *page) 1264{ 1265 return memdesc_zonenum(page->flags); 1266} 1267 1268static inline enum zone_type folio_zonenum(const struct folio *folio) 1269{ 1270 return memdesc_zonenum(folio->flags); 1271} 1272 1273#ifdef CONFIG_ZONE_DEVICE 1274static inline bool memdesc_is_zone_device(memdesc_flags_t mdf) 1275{ 1276 return memdesc_zonenum(mdf) == ZONE_DEVICE; 1277} 1278 1279static inline struct dev_pagemap *page_pgmap(const struct page *page) 1280{ 1281 VM_WARN_ON_ONCE_PAGE(!memdesc_is_zone_device(page->flags), page); 1282 return page_folio(page)->pgmap; 1283} 1284 1285/* 1286 * Consecutive zone device pages should not be merged into the same sgl 1287 * or bvec segment with other types of pages or if they belong to different 1288 * pgmaps. Otherwise getting the pgmap of a given segment is not possible 1289 * without scanning the entire segment. This helper returns true either if 1290 * both pages are not zone device pages or both pages are zone device pages 1291 * with the same pgmap. 1292 */ 1293static inline bool zone_device_pages_have_same_pgmap(const struct page *a, 1294 const struct page *b) 1295{ 1296 if (memdesc_is_zone_device(a->flags) != memdesc_is_zone_device(b->flags)) 1297 return false; 1298 if (!memdesc_is_zone_device(a->flags)) 1299 return true; 1300 return page_pgmap(a) == page_pgmap(b); 1301} 1302 1303extern void memmap_init_zone_device(struct zone *, unsigned long, 1304 unsigned long, struct dev_pagemap *); 1305#else 1306static inline bool memdesc_is_zone_device(memdesc_flags_t mdf) 1307{ 1308 return false; 1309} 1310static inline bool zone_device_pages_have_same_pgmap(const struct page *a, 1311 const struct page *b) 1312{ 1313 return true; 1314} 1315static inline struct dev_pagemap *page_pgmap(const struct page *page) 1316{ 1317 return NULL; 1318} 1319#endif 1320 1321static inline bool is_zone_device_page(const struct page *page) 1322{ 1323 return memdesc_is_zone_device(page->flags); 1324} 1325 1326static inline bool folio_is_zone_device(const struct folio *folio) 1327{ 1328 return memdesc_is_zone_device(folio->flags); 1329} 1330 1331static inline bool is_zone_movable_page(const struct page *page) 1332{ 1333 return page_zonenum(page) == ZONE_MOVABLE; 1334} 1335 1336static inline bool folio_is_zone_movable(const struct folio *folio) 1337{ 1338 return folio_zonenum(folio) == ZONE_MOVABLE; 1339} 1340#endif 1341 1342/* 1343 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 1344 * intersection with the given zone 1345 */ 1346static inline bool zone_intersects(const struct zone *zone, 1347 unsigned long start_pfn, unsigned long nr_pages) 1348{ 1349 if (zone_is_empty(zone)) 1350 return false; 1351 if (start_pfn >= zone_end_pfn(zone) || 1352 start_pfn + nr_pages <= zone->zone_start_pfn) 1353 return false; 1354 1355 return true; 1356} 1357 1358/* 1359 * The "priority" of VM scanning is how much of the queues we will scan in one 1360 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 1361 * queues ("queue_length >> 12") during an aging round. 1362 */ 1363#define DEF_PRIORITY 12 1364 1365/* Maximum number of zones on a zonelist */ 1366#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 1367 1368enum { 1369 ZONELIST_FALLBACK, /* zonelist with fallback */ 1370#ifdef CONFIG_NUMA 1371 /* 1372 * The NUMA zonelists are doubled because we need zonelists that 1373 * restrict the allocations to a single node for __GFP_THISNODE. 1374 */ 1375 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 1376#endif 1377 MAX_ZONELISTS 1378}; 1379 1380/* 1381 * This struct contains information about a zone in a zonelist. It is stored 1382 * here to avoid dereferences into large structures and lookups of tables 1383 */ 1384struct zoneref { 1385 struct zone *zone; /* Pointer to actual zone */ 1386 int zone_idx; /* zone_idx(zoneref->zone) */ 1387}; 1388 1389/* 1390 * One allocation request operates on a zonelist. A zonelist 1391 * is a list of zones, the first one is the 'goal' of the 1392 * allocation, the other zones are fallback zones, in decreasing 1393 * priority. 1394 * 1395 * To speed the reading of the zonelist, the zonerefs contain the zone index 1396 * of the entry being read. Helper functions to access information given 1397 * a struct zoneref are 1398 * 1399 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 1400 * zonelist_zone_idx() - Return the index of the zone for an entry 1401 * zonelist_node_idx() - Return the index of the node for an entry 1402 */ 1403struct zonelist { 1404 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 1405}; 1406 1407/* 1408 * The array of struct pages for flatmem. 1409 * It must be declared for SPARSEMEM as well because there are configurations 1410 * that rely on that. 1411 */ 1412extern struct page *mem_map; 1413 1414#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1415struct deferred_split { 1416 spinlock_t split_queue_lock; 1417 struct list_head split_queue; 1418 unsigned long split_queue_len; 1419}; 1420#endif 1421 1422#ifdef CONFIG_MEMORY_FAILURE 1423/* 1424 * Per NUMA node memory failure handling statistics. 1425 */ 1426struct memory_failure_stats { 1427 /* 1428 * Number of raw pages poisoned. 1429 * Cases not accounted: memory outside kernel control, offline page, 1430 * arch-specific memory_failure (SGX), hwpoison_filter() filtered 1431 * error events, and unpoison actions from hwpoison_unpoison. 1432 */ 1433 unsigned long total; 1434 /* 1435 * Recovery results of poisoned raw pages handled by memory_failure, 1436 * in sync with mf_result. 1437 * total = ignored + failed + delayed + recovered. 1438 * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted. 1439 */ 1440 unsigned long ignored; 1441 unsigned long failed; 1442 unsigned long delayed; 1443 unsigned long recovered; 1444}; 1445#endif 1446 1447/* 1448 * On NUMA machines, each NUMA node would have a pg_data_t to describe 1449 * it's memory layout. On UMA machines there is a single pglist_data which 1450 * describes the whole memory. 1451 * 1452 * Memory statistics and page replacement data structures are maintained on a 1453 * per-zone basis. 1454 */ 1455typedef struct pglist_data { 1456 /* 1457 * node_zones contains just the zones for THIS node. Not all of the 1458 * zones may be populated, but it is the full list. It is referenced by 1459 * this node's node_zonelists as well as other node's node_zonelists. 1460 */ 1461 struct zone node_zones[MAX_NR_ZONES]; 1462 1463 /* 1464 * node_zonelists contains references to all zones in all nodes. 1465 * Generally the first zones will be references to this node's 1466 * node_zones. 1467 */ 1468 struct zonelist node_zonelists[MAX_ZONELISTS]; 1469 1470 int nr_zones; /* number of populated zones in this node */ 1471#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ 1472 struct page *node_mem_map; 1473#ifdef CONFIG_PAGE_EXTENSION 1474 struct page_ext *node_page_ext; 1475#endif 1476#endif 1477#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 1478 /* 1479 * Must be held any time you expect node_start_pfn, 1480 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 1481 * Also synchronizes pgdat->first_deferred_pfn during deferred page 1482 * init. 1483 * 1484 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 1485 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 1486 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 1487 * 1488 * Nests above zone->lock and zone->span_seqlock 1489 */ 1490 spinlock_t node_size_lock; 1491#endif 1492 unsigned long node_start_pfn; 1493 unsigned long node_present_pages; /* total number of physical pages */ 1494 unsigned long node_spanned_pages; /* total size of physical page 1495 range, including holes */ 1496 int node_id; 1497 wait_queue_head_t kswapd_wait; 1498 wait_queue_head_t pfmemalloc_wait; 1499 1500 /* workqueues for throttling reclaim for different reasons. */ 1501 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; 1502 1503 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ 1504 unsigned long nr_reclaim_start; /* nr pages written while throttled 1505 * when throttling started. */ 1506#ifdef CONFIG_MEMORY_HOTPLUG 1507 struct mutex kswapd_lock; 1508#endif 1509 struct task_struct *kswapd; /* Protected by kswapd_lock */ 1510 int kswapd_order; 1511 enum zone_type kswapd_highest_zoneidx; 1512 1513 atomic_t kswapd_failures; /* Number of 'reclaimed == 0' runs */ 1514 1515#ifdef CONFIG_COMPACTION 1516 int kcompactd_max_order; 1517 enum zone_type kcompactd_highest_zoneidx; 1518 wait_queue_head_t kcompactd_wait; 1519 struct task_struct *kcompactd; 1520 bool proactive_compact_trigger; 1521#endif 1522 /* 1523 * This is a per-node reserve of pages that are not available 1524 * to userspace allocations. 1525 */ 1526 unsigned long totalreserve_pages; 1527 1528#ifdef CONFIG_NUMA 1529 /* 1530 * node reclaim becomes active if more unmapped pages exist. 1531 */ 1532 unsigned long min_unmapped_pages; 1533 unsigned long min_slab_pages; 1534#endif /* CONFIG_NUMA */ 1535 1536 /* Write-intensive fields used by page reclaim */ 1537 CACHELINE_PADDING(_pad1_); 1538 1539#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1540 /* 1541 * If memory initialisation on large machines is deferred then this 1542 * is the first PFN that needs to be initialised. 1543 */ 1544 unsigned long first_deferred_pfn; 1545#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1546 1547#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1548 struct deferred_split deferred_split_queue; 1549#endif 1550 1551#ifdef CONFIG_NUMA_BALANCING 1552 /* start time in ms of current promote rate limit period */ 1553 unsigned int nbp_rl_start; 1554 /* number of promote candidate pages at start time of current rate limit period */ 1555 unsigned long nbp_rl_nr_cand; 1556 /* promote threshold in ms */ 1557 unsigned int nbp_threshold; 1558 /* start time in ms of current promote threshold adjustment period */ 1559 unsigned int nbp_th_start; 1560 /* 1561 * number of promote candidate pages at start time of current promote 1562 * threshold adjustment period 1563 */ 1564 unsigned long nbp_th_nr_cand; 1565#endif 1566 /* Fields commonly accessed by the page reclaim scanner */ 1567 1568 /* 1569 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. 1570 * 1571 * Use mem_cgroup_lruvec() to look up lruvecs. 1572 */ 1573 struct lruvec __lruvec; 1574 1575 unsigned long flags; 1576 1577#ifdef CONFIG_LRU_GEN 1578 /* kswap mm walk data */ 1579 struct lru_gen_mm_walk mm_walk; 1580 /* lru_gen_folio list */ 1581 struct lru_gen_memcg memcg_lru; 1582#endif 1583 1584 CACHELINE_PADDING(_pad2_); 1585 1586 /* Per-node vmstats */ 1587 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 1588 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 1589#ifdef CONFIG_NUMA 1590 struct memory_tier __rcu *memtier; 1591#endif 1592#ifdef CONFIG_MEMORY_FAILURE 1593 struct memory_failure_stats mf_stats; 1594#endif 1595} pg_data_t; 1596 1597#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 1598#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 1599 1600#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 1601#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 1602 1603static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 1604{ 1605 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 1606} 1607 1608#include <linux/memory_hotplug.h> 1609 1610void build_all_zonelists(pg_data_t *pgdat); 1611bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 1612 int highest_zoneidx, unsigned int alloc_flags, 1613 long free_pages); 1614bool zone_watermark_ok(struct zone *z, unsigned int order, 1615 unsigned long mark, int highest_zoneidx, 1616 unsigned int alloc_flags); 1617 1618enum kswapd_clear_hopeless_reason { 1619 KSWAPD_CLEAR_HOPELESS_OTHER = 0, 1620 KSWAPD_CLEAR_HOPELESS_KSWAPD, 1621 KSWAPD_CLEAR_HOPELESS_DIRECT, 1622 KSWAPD_CLEAR_HOPELESS_PCP, 1623}; 1624 1625void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 1626 enum zone_type highest_zoneidx); 1627void kswapd_try_clear_hopeless(struct pglist_data *pgdat, 1628 unsigned int order, int highest_zoneidx); 1629void kswapd_clear_hopeless(pg_data_t *pgdat, enum kswapd_clear_hopeless_reason reason); 1630bool kswapd_test_hopeless(pg_data_t *pgdat); 1631 1632/* 1633 * Memory initialization context, use to differentiate memory added by 1634 * the platform statically or via memory hotplug interface. 1635 */ 1636enum meminit_context { 1637 MEMINIT_EARLY, 1638 MEMINIT_HOTPLUG, 1639}; 1640 1641extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 1642 unsigned long size); 1643 1644extern void lruvec_init(struct lruvec *lruvec); 1645 1646static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 1647{ 1648#ifdef CONFIG_MEMCG 1649 return lruvec->pgdat; 1650#else 1651 return container_of(lruvec, struct pglist_data, __lruvec); 1652#endif 1653} 1654 1655#ifdef CONFIG_HAVE_MEMORYLESS_NODES 1656int local_memory_node(int node_id); 1657#else 1658static inline int local_memory_node(int node_id) { return node_id; }; 1659#endif 1660 1661/* 1662 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 1663 */ 1664#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 1665 1666#ifdef CONFIG_ZONE_DEVICE 1667static inline bool zone_is_zone_device(const struct zone *zone) 1668{ 1669 return zone_idx(zone) == ZONE_DEVICE; 1670} 1671#else 1672static inline bool zone_is_zone_device(const struct zone *zone) 1673{ 1674 return false; 1675} 1676#endif 1677 1678/* 1679 * Returns true if a zone has pages managed by the buddy allocator. 1680 * All the reclaim decisions have to use this function rather than 1681 * populated_zone(). If the whole zone is reserved then we can easily 1682 * end up with populated_zone() && !managed_zone(). 1683 */ 1684static inline bool managed_zone(const struct zone *zone) 1685{ 1686 return zone_managed_pages(zone); 1687} 1688 1689/* Returns true if a zone has memory */ 1690static inline bool populated_zone(const struct zone *zone) 1691{ 1692 return zone->present_pages; 1693} 1694 1695#ifdef CONFIG_NUMA 1696static inline int zone_to_nid(const struct zone *zone) 1697{ 1698 return zone->node; 1699} 1700 1701static inline void zone_set_nid(struct zone *zone, int nid) 1702{ 1703 zone->node = nid; 1704} 1705#else 1706static inline int zone_to_nid(const struct zone *zone) 1707{ 1708 return 0; 1709} 1710 1711static inline void zone_set_nid(struct zone *zone, int nid) {} 1712#endif 1713 1714extern int movable_zone; 1715 1716static inline int is_highmem_idx(enum zone_type idx) 1717{ 1718#ifdef CONFIG_HIGHMEM 1719 return (idx == ZONE_HIGHMEM || 1720 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); 1721#else 1722 return 0; 1723#endif 1724} 1725 1726/** 1727 * is_highmem - helper function to quickly check if a struct zone is a 1728 * highmem zone or not. This is an attempt to keep references 1729 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 1730 * @zone: pointer to struct zone variable 1731 * Return: 1 for a highmem zone, 0 otherwise 1732 */ 1733static inline int is_highmem(const struct zone *zone) 1734{ 1735 return is_highmem_idx(zone_idx(zone)); 1736} 1737 1738bool has_managed_zone(enum zone_type zone); 1739static inline bool has_managed_dma(void) 1740{ 1741#ifdef CONFIG_ZONE_DMA 1742 return has_managed_zone(ZONE_DMA); 1743#else 1744 return false; 1745#endif 1746} 1747 1748 1749#ifndef CONFIG_NUMA 1750 1751extern struct pglist_data contig_page_data; 1752static inline struct pglist_data *NODE_DATA(int nid) 1753{ 1754 return &contig_page_data; 1755} 1756 1757#else /* CONFIG_NUMA */ 1758 1759#include <asm/mmzone.h> 1760 1761#endif /* !CONFIG_NUMA */ 1762 1763extern struct pglist_data *first_online_pgdat(void); 1764extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 1765extern struct zone *next_zone(struct zone *zone); 1766 1767/** 1768 * for_each_online_pgdat - helper macro to iterate over all online nodes 1769 * @pgdat: pointer to a pg_data_t variable 1770 */ 1771#define for_each_online_pgdat(pgdat) \ 1772 for (pgdat = first_online_pgdat(); \ 1773 pgdat; \ 1774 pgdat = next_online_pgdat(pgdat)) 1775/** 1776 * for_each_zone - helper macro to iterate over all memory zones 1777 * @zone: pointer to struct zone variable 1778 * 1779 * The user only needs to declare the zone variable, for_each_zone 1780 * fills it in. 1781 */ 1782#define for_each_zone(zone) \ 1783 for (zone = (first_online_pgdat())->node_zones; \ 1784 zone; \ 1785 zone = next_zone(zone)) 1786 1787#define for_each_populated_zone(zone) \ 1788 for (zone = (first_online_pgdat())->node_zones; \ 1789 zone; \ 1790 zone = next_zone(zone)) \ 1791 if (!populated_zone(zone)) \ 1792 ; /* do nothing */ \ 1793 else 1794 1795static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1796{ 1797 return zoneref->zone; 1798} 1799 1800static inline int zonelist_zone_idx(const struct zoneref *zoneref) 1801{ 1802 return zoneref->zone_idx; 1803} 1804 1805static inline int zonelist_node_idx(const struct zoneref *zoneref) 1806{ 1807 return zone_to_nid(zoneref->zone); 1808} 1809 1810struct zoneref *__next_zones_zonelist(struct zoneref *z, 1811 enum zone_type highest_zoneidx, 1812 nodemask_t *nodes); 1813 1814/** 1815 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1816 * @z: The cursor used as a starting point for the search 1817 * @highest_zoneidx: The zone index of the highest zone to return 1818 * @nodes: An optional nodemask to filter the zonelist with 1819 * 1820 * This function returns the next zone at or below a given zone index that is 1821 * within the allowed nodemask using a cursor as the starting point for the 1822 * search. The zoneref returned is a cursor that represents the current zone 1823 * being examined. It should be advanced by one before calling 1824 * next_zones_zonelist again. 1825 * 1826 * Return: the next zone at or below highest_zoneidx within the allowed 1827 * nodemask using a cursor within a zonelist as a starting point 1828 */ 1829static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1830 enum zone_type highest_zoneidx, 1831 nodemask_t *nodes) 1832{ 1833 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1834 return z; 1835 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1836} 1837 1838/** 1839 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1840 * @zonelist: The zonelist to search for a suitable zone 1841 * @highest_zoneidx: The zone index of the highest zone to return 1842 * @nodes: An optional nodemask to filter the zonelist with 1843 * 1844 * This function returns the first zone at or below a given zone index that is 1845 * within the allowed nodemask. The zoneref returned is a cursor that can be 1846 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1847 * one before calling. 1848 * 1849 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1850 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1851 * update due to cpuset modification. 1852 * 1853 * Return: Zoneref pointer for the first suitable zone found 1854 */ 1855static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1856 enum zone_type highest_zoneidx, 1857 nodemask_t *nodes) 1858{ 1859 return next_zones_zonelist(zonelist->_zonerefs, 1860 highest_zoneidx, nodes); 1861} 1862 1863/** 1864 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1865 * @zone: The current zone in the iterator 1866 * @z: The current pointer within zonelist->_zonerefs being iterated 1867 * @zlist: The zonelist being iterated 1868 * @highidx: The zone index of the highest zone to return 1869 * @nodemask: Nodemask allowed by the allocator 1870 * 1871 * This iterator iterates though all zones at or below a given zone index and 1872 * within a given nodemask 1873 */ 1874#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1875 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1876 zone; \ 1877 z = next_zones_zonelist(++z, highidx, nodemask), \ 1878 zone = zonelist_zone(z)) 1879 1880#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ 1881 for (zone = zonelist_zone(z); \ 1882 zone; \ 1883 z = next_zones_zonelist(++z, highidx, nodemask), \ 1884 zone = zonelist_zone(z)) 1885 1886 1887/** 1888 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1889 * @zone: The current zone in the iterator 1890 * @z: The current pointer within zonelist->zones being iterated 1891 * @zlist: The zonelist being iterated 1892 * @highidx: The zone index of the highest zone to return 1893 * 1894 * This iterator iterates though all zones at or below a given zone index. 1895 */ 1896#define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1897 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1898 1899/* Whether the 'nodes' are all movable nodes */ 1900static inline bool movable_only_nodes(nodemask_t *nodes) 1901{ 1902 struct zonelist *zonelist; 1903 struct zoneref *z; 1904 int nid; 1905 1906 if (nodes_empty(*nodes)) 1907 return false; 1908 1909 /* 1910 * We can chose arbitrary node from the nodemask to get a 1911 * zonelist as they are interlinked. We just need to find 1912 * at least one zone that can satisfy kernel allocations. 1913 */ 1914 nid = first_node(*nodes); 1915 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; 1916 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); 1917 return (!zonelist_zone(z)) ? true : false; 1918} 1919 1920 1921#ifdef CONFIG_SPARSEMEM 1922#include <asm/sparsemem.h> 1923#endif 1924 1925#ifdef CONFIG_FLATMEM 1926#define pfn_to_nid(pfn) (0) 1927#endif 1928 1929#ifdef CONFIG_SPARSEMEM 1930 1931/* 1932 * PA_SECTION_SHIFT physical address to/from section number 1933 * PFN_SECTION_SHIFT pfn to/from section number 1934 */ 1935#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1936#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1937 1938#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1939 1940#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1941#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1942 1943#define SECTION_BLOCKFLAGS_BITS \ 1944 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1945 1946#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS 1947#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE 1948#endif 1949 1950static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1951{ 1952 return pfn >> PFN_SECTION_SHIFT; 1953} 1954static inline unsigned long section_nr_to_pfn(unsigned long sec) 1955{ 1956 return sec << PFN_SECTION_SHIFT; 1957} 1958 1959#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1960#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1961 1962#define SUBSECTION_SHIFT 21 1963#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) 1964 1965#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) 1966#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) 1967#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) 1968 1969#if SUBSECTION_SHIFT > SECTION_SIZE_BITS 1970#error Subsection size exceeds section size 1971#else 1972#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) 1973#endif 1974 1975#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) 1976#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) 1977 1978struct mem_section_usage { 1979 struct rcu_head rcu; 1980#ifdef CONFIG_SPARSEMEM_VMEMMAP 1981 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); 1982#endif 1983 /* See declaration of similar field in struct zone */ 1984 unsigned long pageblock_flags[0]; 1985}; 1986 1987struct page; 1988struct page_ext; 1989struct mem_section { 1990 /* 1991 * This is, logically, a pointer to an array of struct 1992 * pages. However, it is stored with some other magic. 1993 * (see sparse_init_one_section()) 1994 * 1995 * Additionally during early boot we encode node id of 1996 * the location of the section here to guide allocation. 1997 * (see sparse.c::memory_present()) 1998 * 1999 * Making it a UL at least makes someone do a cast 2000 * before using it wrong. 2001 */ 2002 unsigned long section_mem_map; 2003 2004 struct mem_section_usage *usage; 2005#ifdef CONFIG_PAGE_EXTENSION 2006 /* 2007 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 2008 * section. (see page_ext.h about this.) 2009 */ 2010 struct page_ext *page_ext; 2011 unsigned long pad; 2012#endif 2013 /* 2014 * WARNING: mem_section must be a power-of-2 in size for the 2015 * calculation and use of SECTION_ROOT_MASK to make sense. 2016 */ 2017}; 2018 2019#ifdef CONFIG_SPARSEMEM_EXTREME 2020#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 2021#else 2022#define SECTIONS_PER_ROOT 1 2023#endif 2024 2025#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 2026#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 2027#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 2028 2029#ifdef CONFIG_SPARSEMEM_EXTREME 2030extern struct mem_section **mem_section; 2031#else 2032extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 2033#endif 2034 2035static inline unsigned long *section_to_usemap(struct mem_section *ms) 2036{ 2037 return ms->usage->pageblock_flags; 2038} 2039 2040static inline struct mem_section *__nr_to_section(unsigned long nr) 2041{ 2042 unsigned long root = SECTION_NR_TO_ROOT(nr); 2043 2044 if (unlikely(root >= NR_SECTION_ROOTS)) 2045 return NULL; 2046 2047#ifdef CONFIG_SPARSEMEM_EXTREME 2048 if (!mem_section || !mem_section[root]) 2049 return NULL; 2050#endif 2051 return &mem_section[root][nr & SECTION_ROOT_MASK]; 2052} 2053extern size_t mem_section_usage_size(void); 2054 2055/* 2056 * We use the lower bits of the mem_map pointer to store 2057 * a little bit of information. The pointer is calculated 2058 * as mem_map - section_nr_to_pfn(pnum). The result is 2059 * aligned to the minimum alignment of the two values: 2060 * 1. All mem_map arrays are page-aligned. 2061 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 2062 * lowest bits. PFN_SECTION_SHIFT is arch-specific 2063 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 2064 * worst combination is powerpc with 256k pages, 2065 * which results in PFN_SECTION_SHIFT equal 6. 2066 * To sum it up, at least 6 bits are available on all architectures. 2067 * However, we can exceed 6 bits on some other architectures except 2068 * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available 2069 * with the worst case of 64K pages on arm64) if we make sure the 2070 * exceeded bit is not applicable to powerpc. 2071 */ 2072enum { 2073 SECTION_MARKED_PRESENT_BIT, 2074 SECTION_HAS_MEM_MAP_BIT, 2075 SECTION_IS_ONLINE_BIT, 2076 SECTION_IS_EARLY_BIT, 2077#ifdef CONFIG_ZONE_DEVICE 2078 SECTION_TAINT_ZONE_DEVICE_BIT, 2079#endif 2080#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT 2081 SECTION_IS_VMEMMAP_PREINIT_BIT, 2082#endif 2083 SECTION_MAP_LAST_BIT, 2084}; 2085 2086#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT) 2087#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT) 2088#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT) 2089#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT) 2090#ifdef CONFIG_ZONE_DEVICE 2091#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) 2092#endif 2093#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT 2094#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT) 2095#endif 2096#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) 2097#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT 2098 2099static inline struct page *__section_mem_map_addr(struct mem_section *section) 2100{ 2101 unsigned long map = section->section_mem_map; 2102 map &= SECTION_MAP_MASK; 2103 return (struct page *)map; 2104} 2105 2106static inline int present_section(const struct mem_section *section) 2107{ 2108 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 2109} 2110 2111static inline int present_section_nr(unsigned long nr) 2112{ 2113 return present_section(__nr_to_section(nr)); 2114} 2115 2116static inline int valid_section(const struct mem_section *section) 2117{ 2118 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 2119} 2120 2121static inline int early_section(const struct mem_section *section) 2122{ 2123 return (section && (section->section_mem_map & SECTION_IS_EARLY)); 2124} 2125 2126static inline int valid_section_nr(unsigned long nr) 2127{ 2128 return valid_section(__nr_to_section(nr)); 2129} 2130 2131static inline int online_section(const struct mem_section *section) 2132{ 2133 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 2134} 2135 2136#ifdef CONFIG_ZONE_DEVICE 2137static inline int online_device_section(const struct mem_section *section) 2138{ 2139 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; 2140 2141 return section && ((section->section_mem_map & flags) == flags); 2142} 2143#else 2144static inline int online_device_section(const struct mem_section *section) 2145{ 2146 return 0; 2147} 2148#endif 2149 2150#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT 2151static inline int preinited_vmemmap_section(const struct mem_section *section) 2152{ 2153 return (section && 2154 (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); 2155} 2156 2157void sparse_vmemmap_init_nid_early(int nid); 2158void sparse_vmemmap_init_nid_late(int nid); 2159 2160#else 2161static inline int preinited_vmemmap_section(const struct mem_section *section) 2162{ 2163 return 0; 2164} 2165static inline void sparse_vmemmap_init_nid_early(int nid) 2166{ 2167} 2168 2169static inline void sparse_vmemmap_init_nid_late(int nid) 2170{ 2171} 2172#endif 2173 2174static inline int online_section_nr(unsigned long nr) 2175{ 2176 return online_section(__nr_to_section(nr)); 2177} 2178 2179#ifdef CONFIG_MEMORY_HOTPLUG 2180void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 2181void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 2182#endif 2183 2184static inline struct mem_section *__pfn_to_section(unsigned long pfn) 2185{ 2186 return __nr_to_section(pfn_to_section_nr(pfn)); 2187} 2188 2189extern unsigned long __highest_present_section_nr; 2190 2191static inline int subsection_map_index(unsigned long pfn) 2192{ 2193 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; 2194} 2195 2196#ifdef CONFIG_SPARSEMEM_VMEMMAP 2197static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 2198{ 2199 int idx = subsection_map_index(pfn); 2200 struct mem_section_usage *usage = READ_ONCE(ms->usage); 2201 2202 return usage ? test_bit(idx, usage->subsection_map) : 0; 2203} 2204 2205static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn) 2206{ 2207 struct mem_section_usage *usage = READ_ONCE(ms->usage); 2208 int idx = subsection_map_index(*pfn); 2209 unsigned long bit; 2210 2211 if (!usage) 2212 return false; 2213 2214 if (test_bit(idx, usage->subsection_map)) 2215 return true; 2216 2217 /* Find the next subsection that exists */ 2218 bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx); 2219 if (bit == SUBSECTIONS_PER_SECTION) 2220 return false; 2221 2222 *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION); 2223 return true; 2224} 2225#else 2226static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 2227{ 2228 return 1; 2229} 2230 2231static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn) 2232{ 2233 return true; 2234} 2235#endif 2236 2237void sparse_init_early_section(int nid, struct page *map, unsigned long pnum, 2238 unsigned long flags); 2239 2240#ifndef CONFIG_HAVE_ARCH_PFN_VALID 2241/** 2242 * pfn_valid - check if there is a valid memory map entry for a PFN 2243 * @pfn: the page frame number to check 2244 * 2245 * Check if there is a valid memory map entry aka struct page for the @pfn. 2246 * Note, that availability of the memory map entry does not imply that 2247 * there is actual usable memory at that @pfn. The struct page may 2248 * represent a hole or an unusable page frame. 2249 * 2250 * Return: 1 for PFNs that have memory map entries and 0 otherwise 2251 */ 2252static inline int pfn_valid(unsigned long pfn) 2253{ 2254 struct mem_section *ms; 2255 int ret; 2256 2257 /* 2258 * Ensure the upper PAGE_SHIFT bits are clear in the 2259 * pfn. Else it might lead to false positives when 2260 * some of the upper bits are set, but the lower bits 2261 * match a valid pfn. 2262 */ 2263 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) 2264 return 0; 2265 2266 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 2267 return 0; 2268 ms = __pfn_to_section(pfn); 2269 rcu_read_lock_sched(); 2270 if (!valid_section(ms)) { 2271 rcu_read_unlock_sched(); 2272 return 0; 2273 } 2274 /* 2275 * Traditionally early sections always returned pfn_valid() for 2276 * the entire section-sized span. 2277 */ 2278 ret = early_section(ms) || pfn_section_valid(ms, pfn); 2279 rcu_read_unlock_sched(); 2280 2281 return ret; 2282} 2283 2284/* Returns end_pfn or higher if no valid PFN remaining in range */ 2285static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn) 2286{ 2287 unsigned long nr = pfn_to_section_nr(pfn); 2288 2289 rcu_read_lock_sched(); 2290 2291 while (nr <= __highest_present_section_nr && pfn < end_pfn) { 2292 struct mem_section *ms = __pfn_to_section(pfn); 2293 2294 if (valid_section(ms) && 2295 (early_section(ms) || pfn_section_first_valid(ms, &pfn))) { 2296 rcu_read_unlock_sched(); 2297 return pfn; 2298 } 2299 2300 /* Nothing left in this section? Skip to next section */ 2301 nr++; 2302 pfn = section_nr_to_pfn(nr); 2303 } 2304 2305 rcu_read_unlock_sched(); 2306 return end_pfn; 2307} 2308 2309static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn) 2310{ 2311 pfn++; 2312 2313 if (pfn >= end_pfn) 2314 return end_pfn; 2315 2316 /* 2317 * Either every PFN within the section (or subsection for VMEMMAP) is 2318 * valid, or none of them are. So there's no point repeating the check 2319 * for every PFN; only call first_valid_pfn() again when crossing a 2320 * (sub)section boundary (i.e. !(pfn & ~PAGE_{SUB,}SECTION_MASK)). 2321 */ 2322 if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ? 2323 PAGE_SUBSECTION_MASK : PAGE_SECTION_MASK)) 2324 return pfn; 2325 2326 return first_valid_pfn(pfn, end_pfn); 2327} 2328 2329 2330#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \ 2331 for ((_pfn) = first_valid_pfn((_start_pfn), (_end_pfn)); \ 2332 (_pfn) < (_end_pfn); \ 2333 (_pfn) = next_valid_pfn((_pfn), (_end_pfn))) 2334 2335#endif 2336 2337static inline int pfn_in_present_section(unsigned long pfn) 2338{ 2339 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 2340 return 0; 2341 return present_section(__pfn_to_section(pfn)); 2342} 2343 2344static inline unsigned long next_present_section_nr(unsigned long section_nr) 2345{ 2346 while (++section_nr <= __highest_present_section_nr) { 2347 if (present_section_nr(section_nr)) 2348 return section_nr; 2349 } 2350 2351 return -1; 2352} 2353 2354#define for_each_present_section_nr(start, section_nr) \ 2355 for (section_nr = next_present_section_nr(start - 1); \ 2356 section_nr != -1; \ 2357 section_nr = next_present_section_nr(section_nr)) 2358 2359/* 2360 * These are _only_ used during initialisation, therefore they 2361 * can use __initdata ... They could have names to indicate 2362 * this restriction. 2363 */ 2364#ifdef CONFIG_NUMA 2365#define pfn_to_nid(pfn) \ 2366({ \ 2367 unsigned long __pfn_to_nid_pfn = (pfn); \ 2368 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 2369}) 2370#else 2371#define pfn_to_nid(pfn) (0) 2372#endif 2373 2374#else 2375#define sparse_vmemmap_init_nid_early(_nid) do {} while (0) 2376#define sparse_vmemmap_init_nid_late(_nid) do {} while (0) 2377#define pfn_in_present_section pfn_valid 2378#endif /* CONFIG_SPARSEMEM */ 2379 2380/* 2381 * Fallback case for when the architecture provides its own pfn_valid() but 2382 * not a corresponding for_each_valid_pfn(). 2383 */ 2384#ifndef for_each_valid_pfn 2385#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \ 2386 for ((_pfn) = (_start_pfn); (_pfn) < (_end_pfn); (_pfn)++) \ 2387 if (pfn_valid(_pfn)) 2388#endif 2389 2390#endif /* !__GENERATING_BOUNDS.H */ 2391#endif /* !__ASSEMBLY__ */ 2392#endif /* _LINUX_MMZONE_H */