Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at d986ba0329dcca102e227995371135c9bbcefb6b 1238 lines 41 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Macros for manipulating and testing page->flags 4 */ 5 6#ifndef PAGE_FLAGS_H 7#define PAGE_FLAGS_H 8 9#include <linux/types.h> 10#include <linux/bug.h> 11#include <linux/mmdebug.h> 12#ifndef __GENERATING_BOUNDS_H 13#include <linux/mm_types.h> 14#include <generated/bounds.h> 15#endif /* !__GENERATING_BOUNDS_H */ 16 17/* 18 * Various page->flags bits: 19 * 20 * PG_reserved is set for special pages. The "struct page" of such a page 21 * should in general not be touched (e.g. set dirty) except by its owner. 22 * Pages marked as PG_reserved include: 23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, 24 * initrd, HW tables) 25 * - Pages reserved or allocated early during boot (before the page allocator 26 * was initialized). This includes (depending on the architecture) the 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 28 * much more. Once (if ever) freed, PG_reserved is cleared and they will 29 * be given to the page allocator. 30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying 31 * to read/write these pages might end badly. Don't touch! 32 * - The zero page(s) 33 * - Pages allocated in the context of kexec/kdump (loaded kernel image, 34 * control pages, vmcoreinfo) 35 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are 36 * not marked PG_reserved (as they might be in use by somebody else who does 37 * not respect the caching strategy). 38 * - MCA pages on ia64 39 * - Pages holding CPU notes for POWER Firmware Assisted Dump 40 * - Device memory (e.g. PMEM, DAX, HMM) 41 * Some PG_reserved pages will be excluded from the hibernation image. 42 * PG_reserved does in general not hinder anybody from dumping or swapping 43 * and is no longer required for remap_pfn_range(). ioremap might require it. 44 * Consequently, PG_reserved for a page mapped into user space can indicate 45 * the zero page, the vDSO, MMIO pages or device memory. 46 * 47 * The PG_private bitflag is set on pagecache pages if they contain filesystem 48 * specific data (which is normally at page->private). It can be used by 49 * private allocations for its own usage. 50 * 51 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O 52 * and cleared when writeback _starts_ or when read _completes_. PG_writeback 53 * is set before writeback starts and cleared when it finishes. 54 * 55 * PG_locked also pins a page in pagecache, and blocks truncation of the file 56 * while it is held. 57 * 58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page 59 * to become unlocked. 60 * 61 * PG_swapbacked is set when a page uses swap as a backing storage. This are 62 * usually PageAnon or shmem pages but please note that even anonymous pages 63 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as 64 * a result of MADV_FREE). 65 * 66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and 67 * file-backed pagecache (see mm/vmscan.c). 68 * 69 * PG_arch_1 is an architecture specific page state bit. The generic code 70 * guarantees that this bit is cleared for a page when it first is entered into 71 * the page cache. 72 * 73 * PG_hwpoison indicates that a page got corrupted in hardware and contains 74 * data with incorrect ECC bits that triggered a machine check. Accessing is 75 * not safe since it may cause another machine check. Don't touch! 76 */ 77 78/* 79 * Don't use the pageflags directly. Use the PageFoo macros. 80 * 81 * The page flags field is split into two parts, the main flags area 82 * which extends from the low bits upwards, and the fields area which 83 * extends from the high bits downwards. 84 * 85 * | FIELD | ... | FLAGS | 86 * N-1 ^ 0 87 * (NR_PAGEFLAGS) 88 * 89 * The fields area is reserved for fields mapping zone, node (for NUMA) and 90 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like 91 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). 92 */ 93enum pageflags { 94 PG_locked, /* Page is locked. Don't touch. */ 95 PG_writeback, /* Page is under writeback */ 96 PG_referenced, 97 PG_uptodate, 98 PG_dirty, 99 PG_lru, 100 PG_head, /* Must be in bit 6 */ 101 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 102 PG_active, 103 PG_workingset, 104 PG_owner_priv_1, /* Owner use. If pagecache, fs may use */ 105 PG_owner_2, /* Owner use. If pagecache, fs may use */ 106 PG_arch_1, 107 PG_reserved, 108 PG_private, /* If pagecache, has fs-private data */ 109 PG_private_2, /* If pagecache, has fs aux data */ 110 PG_reclaim, /* To be reclaimed asap */ 111 PG_swapbacked, /* Page is backed by RAM/swap */ 112 PG_unevictable, /* Page is "unevictable" */ 113 PG_dropbehind, /* drop pages on IO completion */ 114#ifdef CONFIG_MMU 115 PG_mlocked, /* Page is vma mlocked */ 116#endif 117#ifdef CONFIG_MEMORY_FAILURE 118 PG_hwpoison, /* hardware poisoned page. Don't touch */ 119#endif 120#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) 121 PG_young, 122 PG_idle, 123#endif 124#ifdef CONFIG_ARCH_USES_PG_ARCH_2 125 PG_arch_2, 126#endif 127#ifdef CONFIG_ARCH_USES_PG_ARCH_3 128 PG_arch_3, 129#endif 130 __NR_PAGEFLAGS, 131 132 PG_readahead = PG_reclaim, 133 134 /* Anonymous memory (and shmem) */ 135 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ 136 /* Some filesystems */ 137 PG_checked = PG_owner_priv_1, 138 139 /* 140 * Depending on the way an anonymous folio can be mapped into a page 141 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped 142 * THP), PG_anon_exclusive may be set only for the head page or for 143 * tail pages of an anonymous folio. For now, we only expect it to be 144 * set on tail pages for PTE-mapped THP. 145 */ 146 PG_anon_exclusive = PG_owner_2, 147 148 /* 149 * Set if all buffer heads in the folio are mapped. 150 * Filesystems which do not use BHs can use it for their own purpose. 151 */ 152 PG_mappedtodisk = PG_owner_2, 153 154 /* Two page bits are conscripted by FS-Cache to maintain local caching 155 * state. These bits are set on pages belonging to the netfs's inodes 156 * when those inodes are being locally cached. 157 */ 158 PG_fscache = PG_private_2, /* page backed by cache */ 159 160 /* XEN */ 161 /* Pinned in Xen as a read-only pagetable page. */ 162 PG_pinned = PG_owner_priv_1, 163 /* Pinned as part of domain save (see xen_mm_pin_all()). */ 164 PG_savepinned = PG_dirty, 165 /* Has a grant mapping of another (foreign) domain's page. */ 166 PG_foreign = PG_owner_priv_1, 167 /* Remapped by swiotlb-xen. */ 168 PG_xen_remapped = PG_owner_priv_1, 169 170#ifdef CONFIG_MIGRATION 171 /* movable_ops page that is isolated for migration */ 172 PG_movable_ops_isolated = PG_reclaim, 173 /* this is a movable_ops page (for selected typed pages only) */ 174 PG_movable_ops = PG_uptodate, 175#endif 176 177 /* Only valid for buddy pages. Used to track pages that are reported */ 178 PG_reported = PG_uptodate, 179 180#ifdef CONFIG_MEMORY_HOTPLUG 181 /* For self-hosted memmap pages */ 182 PG_vmemmap_self_hosted = PG_owner_priv_1, 183#endif 184 185 /* 186 * Flags only valid for compound pages. Stored in first tail page's 187 * flags word. Cannot use the first 8 flags or any flag marked as 188 * PF_ANY. 189 */ 190 191 /* At least one page in this folio has the hwpoison flag set */ 192 PG_has_hwpoisoned = PG_active, 193 PG_large_rmappable = PG_workingset, /* anon or file-backed */ 194 PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */ 195}; 196 197#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) 198 199#ifndef __GENERATING_BOUNDS_H 200 201/* 202 * For tail pages, if the size of struct page is power-of-2 ->compound_info 203 * encodes the mask that converts the address of the tail page address to 204 * the head page address. 205 * 206 * Otherwise, ->compound_info has direct pointer to head pages. 207 */ 208static __always_inline bool compound_info_has_mask(void) 209{ 210 /* 211 * Limit mask usage to HugeTLB vmemmap optimization (HVO) where it 212 * makes a difference. 213 * 214 * The approach with mask would work in the wider set of conditions, 215 * but it requires validating that struct pages are naturally aligned 216 * for all orders up to the MAX_FOLIO_ORDER, which can be tricky. 217 */ 218 if (!IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)) 219 return false; 220 221 return is_power_of_2(sizeof(struct page)); 222} 223 224static __always_inline unsigned long _compound_head(const struct page *page) 225{ 226 unsigned long info = READ_ONCE(page->compound_info); 227 unsigned long mask; 228 229 if (!compound_info_has_mask()) { 230 /* Bit 0 encodes PageTail() */ 231 if (info & 1) 232 return info - 1; 233 234 return (unsigned long)page; 235 } 236 237 /* 238 * If compound_info_has_mask() is true the rest of the info encodes 239 * the mask that converts the address of the tail page to the head page. 240 * 241 * No need to clear bit 0 in the mask as 'page' always has it clear. 242 * 243 * Let's do it in a branchless manner. 244 */ 245 246 /* Non-tail: -1UL, Tail: 0 */ 247 mask = (info & 1) - 1; 248 249 /* Non-tail: -1UL, Tail: info */ 250 mask |= info; 251 252 return (unsigned long)page & mask; 253} 254 255#define compound_head(page) ((typeof(page))_compound_head(page)) 256 257static __always_inline void set_compound_head(struct page *tail, 258 const struct page *head, unsigned int order) 259{ 260 unsigned int shift; 261 unsigned long mask; 262 263 if (!compound_info_has_mask()) { 264 WRITE_ONCE(tail->compound_info, (unsigned long)head | 1); 265 return; 266 } 267 268 /* 269 * If the size of struct page is power-of-2, bits [shift:0] of the 270 * virtual address of compound head are zero. 271 * 272 * Calculate mask that can be applied to the virtual address of 273 * the tail page to get address of the head page. 274 */ 275 shift = order + order_base_2(sizeof(struct page)); 276 mask = GENMASK(BITS_PER_LONG - 1, shift); 277 278 /* Bit 0 encodes PageTail() */ 279 WRITE_ONCE(tail->compound_info, mask | 1); 280} 281 282static __always_inline void clear_compound_head(struct page *page) 283{ 284 WRITE_ONCE(page->compound_info, 0); 285} 286 287/** 288 * page_folio - Converts from page to folio. 289 * @p: The page. 290 * 291 * Every page is part of a folio. This function cannot be called on a 292 * NULL pointer. 293 * 294 * Context: No reference, nor lock is required on @page. If the caller 295 * does not hold a reference, this call may race with a folio split, so 296 * it should re-check the folio still contains this page after gaining 297 * a reference on the folio. 298 * Return: The folio which contains this page. 299 */ 300#define page_folio(p) (_Generic((p), \ 301 const struct page *: (const struct folio *)_compound_head(p), \ 302 struct page *: (struct folio *)_compound_head(p))) 303 304/** 305 * folio_page - Return a page from a folio. 306 * @folio: The folio. 307 * @n: The page number to return. 308 * 309 * @n is relative to the start of the folio. This function does not 310 * check that the page number lies within @folio; the caller is presumed 311 * to have a reference to the page. 312 */ 313#define folio_page(folio, n) (&(folio)->page + (n)) 314 315static __always_inline int PageTail(const struct page *page) 316{ 317 return READ_ONCE(page->compound_info) & 1; 318} 319 320static __always_inline int PageCompound(const struct page *page) 321{ 322 return test_bit(PG_head, &page->flags.f) || 323 READ_ONCE(page->compound_info) & 1; 324} 325 326#define PAGE_POISON_PATTERN -1l 327static inline int PagePoisoned(const struct page *page) 328{ 329 return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN; 330} 331 332#ifdef CONFIG_DEBUG_VM 333void page_init_poison(struct page *page, size_t size); 334#else 335static inline void page_init_poison(struct page *page, size_t size) 336{ 337} 338#endif 339 340static const unsigned long *const_folio_flags(const struct folio *folio, 341 unsigned n) 342{ 343 const struct page *page = &folio->page; 344 345 VM_BUG_ON_PGFLAGS(page->compound_info & 1, page); 346 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page); 347 return &page[n].flags.f; 348} 349 350static unsigned long *folio_flags(struct folio *folio, unsigned n) 351{ 352 struct page *page = &folio->page; 353 354 VM_BUG_ON_PGFLAGS(page->compound_info & 1, page); 355 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page); 356 return &page[n].flags.f; 357} 358 359/* 360 * Page flags policies wrt compound pages 361 * 362 * PF_POISONED_CHECK 363 * check if this struct page poisoned/uninitialized 364 * 365 * PF_ANY: 366 * the page flag is relevant for small, head and tail pages. 367 * 368 * PF_HEAD: 369 * for compound page all operations related to the page flag applied to 370 * head page. 371 * 372 * PF_NO_TAIL: 373 * modifications of the page flag must be done on small or head pages, 374 * checks can be done on tail pages too. 375 * 376 * PF_NO_COMPOUND: 377 * the page flag is not relevant for compound pages. 378 * 379 * PF_SECOND: 380 * the page flag is stored in the first tail page. 381 */ 382#define PF_POISONED_CHECK(page) ({ \ 383 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ 384 page; }) 385#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) 386#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 387#define PF_NO_TAIL(page, enforce) ({ \ 388 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ 389 PF_POISONED_CHECK(compound_head(page)); }) 390#define PF_NO_COMPOUND(page, enforce) ({ \ 391 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ 392 PF_POISONED_CHECK(page); }) 393#define PF_SECOND(page, enforce) ({ \ 394 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ 395 PF_POISONED_CHECK(&page[1]); }) 396 397/* Which page is the flag stored in */ 398#define FOLIO_PF_ANY 0 399#define FOLIO_PF_HEAD 0 400#define FOLIO_PF_NO_TAIL 0 401#define FOLIO_PF_NO_COMPOUND 0 402#define FOLIO_PF_SECOND 1 403 404#define FOLIO_HEAD_PAGE 0 405#define FOLIO_SECOND_PAGE 1 406 407/* 408 * Macros to create function definitions for page flags 409 */ 410#define FOLIO_TEST_FLAG(name, page) \ 411static __always_inline bool folio_test_##name(const struct folio *folio) \ 412{ return test_bit(PG_##name, const_folio_flags(folio, page)); } 413 414#define FOLIO_SET_FLAG(name, page) \ 415static __always_inline void folio_set_##name(struct folio *folio) \ 416{ set_bit(PG_##name, folio_flags(folio, page)); } 417 418#define FOLIO_CLEAR_FLAG(name, page) \ 419static __always_inline void folio_clear_##name(struct folio *folio) \ 420{ clear_bit(PG_##name, folio_flags(folio, page)); } 421 422#define __FOLIO_SET_FLAG(name, page) \ 423static __always_inline void __folio_set_##name(struct folio *folio) \ 424{ __set_bit(PG_##name, folio_flags(folio, page)); } 425 426#define __FOLIO_CLEAR_FLAG(name, page) \ 427static __always_inline void __folio_clear_##name(struct folio *folio) \ 428{ __clear_bit(PG_##name, folio_flags(folio, page)); } 429 430#define FOLIO_TEST_SET_FLAG(name, page) \ 431static __always_inline bool folio_test_set_##name(struct folio *folio) \ 432{ return test_and_set_bit(PG_##name, folio_flags(folio, page)); } 433 434#define FOLIO_TEST_CLEAR_FLAG(name, page) \ 435static __always_inline bool folio_test_clear_##name(struct folio *folio) \ 436{ return test_and_clear_bit(PG_##name, folio_flags(folio, page)); } 437 438#define FOLIO_FLAG(name, page) \ 439FOLIO_TEST_FLAG(name, page) \ 440FOLIO_SET_FLAG(name, page) \ 441FOLIO_CLEAR_FLAG(name, page) 442 443#define TESTPAGEFLAG(uname, lname, policy) \ 444FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ 445static __always_inline int Page##uname(const struct page *page) \ 446{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); } 447 448#define SETPAGEFLAG(uname, lname, policy) \ 449FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 450static __always_inline void SetPage##uname(struct page *page) \ 451{ set_bit(PG_##lname, &policy(page, 1)->flags.f); } 452 453#define CLEARPAGEFLAG(uname, lname, policy) \ 454FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 455static __always_inline void ClearPage##uname(struct page *page) \ 456{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); } 457 458#define __SETPAGEFLAG(uname, lname, policy) \ 459__FOLIO_SET_FLAG(lname, FOLIO_##policy) \ 460static __always_inline void __SetPage##uname(struct page *page) \ 461{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); } 462 463#define __CLEARPAGEFLAG(uname, lname, policy) \ 464__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ 465static __always_inline void __ClearPage##uname(struct page *page) \ 466{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); } 467 468#define TESTSETFLAG(uname, lname, policy) \ 469FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ 470static __always_inline int TestSetPage##uname(struct page *page) \ 471{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); } 472 473#define TESTCLEARFLAG(uname, lname, policy) \ 474FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ 475static __always_inline int TestClearPage##uname(struct page *page) \ 476{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); } 477 478#define PAGEFLAG(uname, lname, policy) \ 479 TESTPAGEFLAG(uname, lname, policy) \ 480 SETPAGEFLAG(uname, lname, policy) \ 481 CLEARPAGEFLAG(uname, lname, policy) 482 483#define __PAGEFLAG(uname, lname, policy) \ 484 TESTPAGEFLAG(uname, lname, policy) \ 485 __SETPAGEFLAG(uname, lname, policy) \ 486 __CLEARPAGEFLAG(uname, lname, policy) 487 488#define TESTSCFLAG(uname, lname, policy) \ 489 TESTSETFLAG(uname, lname, policy) \ 490 TESTCLEARFLAG(uname, lname, policy) 491 492#define FOLIO_TEST_FLAG_FALSE(name) \ 493static inline bool folio_test_##name(const struct folio *folio) \ 494{ return false; } 495#define FOLIO_SET_FLAG_NOOP(name) \ 496static inline void folio_set_##name(struct folio *folio) { } 497#define FOLIO_CLEAR_FLAG_NOOP(name) \ 498static inline void folio_clear_##name(struct folio *folio) { } 499#define __FOLIO_SET_FLAG_NOOP(name) \ 500static inline void __folio_set_##name(struct folio *folio) { } 501#define __FOLIO_CLEAR_FLAG_NOOP(name) \ 502static inline void __folio_clear_##name(struct folio *folio) { } 503#define FOLIO_TEST_SET_FLAG_FALSE(name) \ 504static inline bool folio_test_set_##name(struct folio *folio) \ 505{ return false; } 506#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \ 507static inline bool folio_test_clear_##name(struct folio *folio) \ 508{ return false; } 509 510#define FOLIO_FLAG_FALSE(name) \ 511FOLIO_TEST_FLAG_FALSE(name) \ 512FOLIO_SET_FLAG_NOOP(name) \ 513FOLIO_CLEAR_FLAG_NOOP(name) 514 515#define TESTPAGEFLAG_FALSE(uname, lname) \ 516FOLIO_TEST_FLAG_FALSE(lname) \ 517static inline int Page##uname(const struct page *page) { return 0; } 518 519#define SETPAGEFLAG_NOOP(uname, lname) \ 520FOLIO_SET_FLAG_NOOP(lname) \ 521static inline void SetPage##uname(struct page *page) { } 522 523#define CLEARPAGEFLAG_NOOP(uname, lname) \ 524FOLIO_CLEAR_FLAG_NOOP(lname) \ 525static inline void ClearPage##uname(struct page *page) { } 526 527#define __CLEARPAGEFLAG_NOOP(uname, lname) \ 528__FOLIO_CLEAR_FLAG_NOOP(lname) \ 529static inline void __ClearPage##uname(struct page *page) { } 530 531#define TESTSETFLAG_FALSE(uname, lname) \ 532FOLIO_TEST_SET_FLAG_FALSE(lname) \ 533static inline int TestSetPage##uname(struct page *page) { return 0; } 534 535#define TESTCLEARFLAG_FALSE(uname, lname) \ 536FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \ 537static inline int TestClearPage##uname(struct page *page) { return 0; } 538 539#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ 540 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) 541 542#define TESTSCFLAG_FALSE(uname, lname) \ 543 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) 544 545__PAGEFLAG(Locked, locked, PF_NO_TAIL) 546FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) 547FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE) 548 FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE) 549 __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE) 550PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) 551 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) 552PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 553 TESTCLEARFLAG(LRU, lru, PF_HEAD) 554FOLIO_FLAG(active, FOLIO_HEAD_PAGE) 555 __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) 556 FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) 557PAGEFLAG(Workingset, workingset, PF_HEAD) 558 TESTCLEARFLAG(Workingset, workingset, PF_HEAD) 559PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 560 561/* Xen */ 562PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) 563 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) 564PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); 565PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); 566PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 567 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) 568 569PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 570 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 571 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 572FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE) 573 __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE) 574 __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE) 575 576/* 577 * Private page markings that may be used by the filesystem that owns the page 578 * for its own purposes. 579 * - PG_private and PG_private_2 cause release_folio() and co to be invoked 580 */ 581PAGEFLAG(Private, private, PF_ANY) 582FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE) 583 584/* owner_2 can be set on tail pages for anon memory */ 585FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) 586 587/* 588 * Only test-and-set exist for PG_writeback. The unconditional operators are 589 * risky: they bypass page accounting. 590 */ 591TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) 592 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) 593FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE) 594 595/* PG_readahead is only used for reads; PG_reclaim is only for writes */ 596PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) 597 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) 598FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) 599 FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE) 600 601FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE) 602 FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE) 603 __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE) 604 605#ifdef CONFIG_HIGHMEM 606/* 607 * Must use a macro here due to header dependency issues. page_zone() is not 608 * available at this point. 609 */ 610#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) 611#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) 612#else 613PAGEFLAG_FALSE(HighMem, highmem) 614#endif 615#define PhysHighMem(__p) (PageHighMem(phys_to_page(__p))) 616 617/* Does kmap_local_folio() only allow access to one page of the folio? */ 618#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP 619#define folio_test_partial_kmap(f) true 620#else 621#define folio_test_partial_kmap(f) folio_test_highmem(f) 622#endif 623 624#ifdef CONFIG_SWAP 625static __always_inline bool folio_test_swapcache(const struct folio *folio) 626{ 627 return folio_test_swapbacked(folio) && 628 test_bit(PG_swapcache, const_folio_flags(folio, 0)); 629} 630 631FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE) 632FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE) 633#else 634FOLIO_FLAG_FALSE(swapcache) 635#endif 636 637FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE) 638 __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) 639 FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) 640 641#ifdef CONFIG_MMU 642FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE) 643 __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) 644 FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) 645 FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE) 646#else 647FOLIO_FLAG_FALSE(mlocked) 648 __FOLIO_CLEAR_FLAG_NOOP(mlocked) 649 FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked) 650 FOLIO_TEST_SET_FLAG_FALSE(mlocked) 651#endif 652 653#ifdef CONFIG_MEMORY_FAILURE 654PAGEFLAG(HWPoison, hwpoison, PF_ANY) 655TESTSCFLAG(HWPoison, hwpoison, PF_ANY) 656#define __PG_HWPOISON (1UL << PG_hwpoison) 657#else 658PAGEFLAG_FALSE(HWPoison, hwpoison) 659#define __PG_HWPOISON 0 660#endif 661 662#ifdef CONFIG_PAGE_IDLE_FLAG 663#ifdef CONFIG_64BIT 664FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) 665FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) 666FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) 667FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) 668#endif 669/* See page_idle.h for !64BIT workaround */ 670#else /* !CONFIG_PAGE_IDLE_FLAG */ 671FOLIO_FLAG_FALSE(young) 672FOLIO_TEST_CLEAR_FLAG_FALSE(young) 673FOLIO_FLAG_FALSE(idle) 674#endif 675 676/* 677 * PageReported() is used to track reported free pages within the Buddy 678 * allocator. We can use the non-atomic version of the test and set 679 * operations as both should be shielded with the zone lock to prevent 680 * any possible races on the setting or clearing of the bit. 681 */ 682__PAGEFLAG(Reported, reported, PF_NO_COMPOUND) 683 684#ifdef CONFIG_MEMORY_HOTPLUG 685PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) 686#else 687PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) 688#endif 689 690/* 691 * On an anonymous folio mapped into a user virtual memory area, 692 * folio->mapping points to its anon_vma, not to a struct address_space; 693 * with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h. 694 * 695 * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 696 * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON 697 * bit; and then folio->mapping points, not to an anon_vma, but to a private 698 * structure which KSM associates with that merged folio. See ksm.h. 699 * 700 * Please note that, confusingly, "folio_mapping" refers to the inode 701 * address_space which maps the folio from disk; whereas "folio_mapped" 702 * refers to user virtual address space into which the folio is mapped. 703 * 704 * For slab pages, since slab reuses the bits in struct page to store its 705 * internal states, the folio->mapping does not exist as such, nor do 706 * these flags below. So in order to avoid testing non-existent bits, 707 * please make sure that folio_test_slab(folio) actually evaluates to 708 * false before calling the following functions (e.g., folio_test_anon). 709 * See mm/slab.h. 710 */ 711#define FOLIO_MAPPING_ANON 0x1 712#define FOLIO_MAPPING_ANON_KSM 0x2 713#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM) 714#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM) 715 716static __always_inline bool folio_test_anon(const struct folio *folio) 717{ 718 return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0; 719} 720 721static __always_inline bool folio_test_lazyfree(const struct folio *folio) 722{ 723 return folio_test_anon(folio) && !folio_test_swapbacked(folio); 724} 725 726static __always_inline bool PageAnonNotKsm(const struct page *page) 727{ 728 unsigned long flags = (unsigned long)page_folio(page)->mapping; 729 730 return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON; 731} 732 733static __always_inline bool PageAnon(const struct page *page) 734{ 735 return folio_test_anon(page_folio(page)); 736} 737#ifdef CONFIG_KSM 738/* 739 * A KSM page is one of those write-protected "shared pages" or "merged pages" 740 * which KSM maps into multiple mms, wherever identical anonymous page content 741 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 742 * anon_vma, but to that page's node of the stable tree. 743 */ 744static __always_inline bool folio_test_ksm(const struct folio *folio) 745{ 746 return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) == 747 FOLIO_MAPPING_KSM; 748} 749#else 750FOLIO_TEST_FLAG_FALSE(ksm) 751#endif 752 753u64 stable_page_flags(const struct page *page); 754 755/** 756 * folio_xor_flags_has_waiters - Change some folio flags. 757 * @folio: The folio. 758 * @mask: Bits set in this word will be changed. 759 * 760 * This must only be used for flags which are changed with the folio 761 * lock held. For example, it is unsafe to use for PG_dirty as that 762 * can be set without the folio lock held. It can also only be used 763 * on flags which are in the range 0-6 as some of the implementations 764 * only affect those bits. 765 * 766 * Return: Whether there are tasks waiting on the folio. 767 */ 768static inline bool folio_xor_flags_has_waiters(struct folio *folio, 769 unsigned long mask) 770{ 771 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); 772} 773 774/** 775 * folio_test_uptodate - Is this folio up to date? 776 * @folio: The folio. 777 * 778 * The uptodate flag is set on a folio when every byte in the folio is 779 * at least as new as the corresponding bytes on storage. Anonymous 780 * and CoW folios are always uptodate. If the folio is not uptodate, 781 * some of the bytes in it may be; see the is_partially_uptodate() 782 * address_space operation. 783 */ 784static inline bool folio_test_uptodate(const struct folio *folio) 785{ 786 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0)); 787 /* 788 * Must ensure that the data we read out of the folio is loaded 789 * _after_ we've loaded folio->flags to check the uptodate bit. 790 * We can skip the barrier if the folio is not uptodate, because 791 * we wouldn't be reading anything from it. 792 * 793 * See folio_mark_uptodate() for the other side of the story. 794 */ 795 if (ret) 796 smp_rmb(); 797 798 return ret; 799} 800 801static inline bool PageUptodate(const struct page *page) 802{ 803 return folio_test_uptodate(page_folio(page)); 804} 805 806static __always_inline void __folio_mark_uptodate(struct folio *folio) 807{ 808 smp_wmb(); 809 __set_bit(PG_uptodate, folio_flags(folio, 0)); 810} 811 812static __always_inline void folio_mark_uptodate(struct folio *folio) 813{ 814 /* 815 * Memory barrier must be issued before setting the PG_uptodate bit, 816 * so that all previous stores issued in order to bring the folio 817 * uptodate are actually visible before folio_test_uptodate becomes true. 818 */ 819 smp_wmb(); 820 set_bit(PG_uptodate, folio_flags(folio, 0)); 821} 822 823static __always_inline void __SetPageUptodate(struct page *page) 824{ 825 __folio_mark_uptodate((struct folio *)page); 826} 827 828static __always_inline void SetPageUptodate(struct page *page) 829{ 830 folio_mark_uptodate((struct folio *)page); 831} 832 833CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) 834 835void __folio_start_writeback(struct folio *folio, bool keep_write); 836void set_page_writeback(struct page *page); 837 838#define folio_start_writeback(folio) \ 839 __folio_start_writeback(folio, false) 840 841static __always_inline bool folio_test_head(const struct folio *folio) 842{ 843 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY)); 844} 845 846static __always_inline int PageHead(const struct page *page) 847{ 848 PF_POISONED_CHECK(page); 849 return test_bit(PG_head, &page->flags.f); 850} 851 852__SETPAGEFLAG(Head, head, PF_ANY) 853__CLEARPAGEFLAG(Head, head, PF_ANY) 854CLEARPAGEFLAG(Head, head, PF_ANY) 855 856/** 857 * folio_test_large() - Does this folio contain more than one page? 858 * @folio: The folio to test. 859 * 860 * Return: True if the folio is larger than one page. 861 */ 862static inline bool folio_test_large(const struct folio *folio) 863{ 864 return folio_test_head(folio); 865} 866 867#ifdef CONFIG_TRANSPARENT_HUGEPAGE 868static inline void ClearPageCompound(struct page *page) 869{ 870 BUG_ON(!PageHead(page)); 871 ClearPageHead(page); 872} 873FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) 874FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) 875#else 876FOLIO_FLAG_FALSE(large_rmappable) 877FOLIO_FLAG_FALSE(partially_mapped) 878#endif 879 880#define PG_head_mask ((1UL << PG_head)) 881 882#ifdef CONFIG_TRANSPARENT_HUGEPAGE 883/* 884 * PageTransCompound returns true for both transparent huge pages 885 * and hugetlbfs pages, so it should only be called when it's known 886 * that hugetlbfs pages aren't involved. 887 */ 888static inline int PageTransCompound(const struct page *page) 889{ 890 return PageCompound(page); 891} 892#else 893TESTPAGEFLAG_FALSE(TransCompound, transcompound) 894#endif 895 896#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 897/* 898 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the 899 * compound page. 900 * 901 * This flag is set by hwpoison handler. Cleared by THP split or free page. 902 */ 903FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE) 904#else 905FOLIO_FLAG_FALSE(has_hwpoisoned) 906#endif 907 908/* 909 * For pages that do not use mapcount, page_type may be used. 910 * The low 24 bits of pagetype may be used for your own purposes, as long 911 * as you are careful to not affect the top 8 bits. The low bits of 912 * pagetype will be overwritten when you clear the page_type from the page. 913 */ 914enum pagetype { 915 /* 0x00-0x7f are positive numbers, ie mapcount */ 916 /* Reserve 0x80-0xef for mapcount overflow. */ 917 PGTY_buddy = 0xf0, 918 PGTY_offline = 0xf1, 919 PGTY_table = 0xf2, 920 PGTY_guard = 0xf3, 921 PGTY_hugetlb = 0xf4, 922 PGTY_slab = 0xf5, 923 PGTY_zsmalloc = 0xf6, 924 PGTY_unaccepted = 0xf7, 925 PGTY_large_kmalloc = 0xf8, 926 PGTY_netpp = 0xf9, 927 928 PGTY_mapcount_underflow = 0xff 929}; 930 931static inline bool page_type_has_type(int page_type) 932{ 933 return page_type < (PGTY_mapcount_underflow << 24); 934} 935 936/* This takes a mapcount which is one more than page->_mapcount */ 937static inline bool page_mapcount_is_type(unsigned int mapcount) 938{ 939 return page_type_has_type(mapcount - 1); 940} 941 942static inline bool page_has_type(const struct page *page) 943{ 944 return page_type_has_type(data_race(page->page_type)); 945} 946 947#define FOLIO_TYPE_OPS(lname, fname) \ 948static __always_inline bool folio_test_##fname(const struct folio *folio) \ 949{ \ 950 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \ 951} \ 952static __always_inline void __folio_set_##fname(struct folio *folio) \ 953{ \ 954 if (folio_test_##fname(folio)) \ 955 return; \ 956 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \ 957 folio); \ 958 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \ 959} \ 960static __always_inline void __folio_clear_##fname(struct folio *folio) \ 961{ \ 962 if (folio->page.page_type == UINT_MAX) \ 963 return; \ 964 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ 965 folio->page.page_type = UINT_MAX; \ 966} 967 968#define PAGE_TYPE_OPS(uname, lname, fname) \ 969FOLIO_TYPE_OPS(lname, fname) \ 970static __always_inline int Page##uname(const struct page *page) \ 971{ \ 972 return data_race(page->page_type >> 24) == PGTY_##lname; \ 973} \ 974static __always_inline void __SetPage##uname(struct page *page) \ 975{ \ 976 if (Page##uname(page)) \ 977 return; \ 978 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \ 979 page->page_type = (unsigned int)PGTY_##lname << 24; \ 980} \ 981static __always_inline void __ClearPage##uname(struct page *page) \ 982{ \ 983 if (page->page_type == UINT_MAX) \ 984 return; \ 985 VM_BUG_ON_PAGE(!Page##uname(page), page); \ 986 page->page_type = UINT_MAX; \ 987} 988 989/* 990 * PageBuddy() indicates that the page is free and in the buddy system 991 * (see mm/page_alloc.c). 992 */ 993PAGE_TYPE_OPS(Buddy, buddy, buddy) 994 995/* 996 * PageOffline() indicates that the page is logically offline although the 997 * containing section is online. (e.g. inflated in a balloon driver or 998 * not onlined when onlining the section). 999 * The content of these pages is effectively stale. Such pages should not 1000 * be touched (read/write/dump/save) except by their owner. 1001 * 1002 * When a memory block gets onlined, all pages are initialized with a 1003 * refcount of 1 and PageOffline(). generic_online_page() will 1004 * take care of clearing PageOffline(). 1005 * 1006 * If a driver wants to allow to offline unmovable PageOffline() pages without 1007 * putting them back to the buddy, it can do so via the memory notifier by 1008 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the 1009 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() 1010 * pages (now with a reference count of zero) are treated like free (unmanaged) 1011 * pages, allowing the containing memory block to get offlined. A driver that 1012 * relies on this feature is aware that re-onlining the memory block will 1013 * require not giving them to the buddy via generic_online_page(). 1014 * 1015 * Memory offlining code will not adjust the managed page count for any 1016 * PageOffline() pages, treating them like they were never exposed to the 1017 * buddy using generic_online_page(). 1018 * 1019 * There are drivers that mark a page PageOffline() and expect there won't be 1020 * any further access to page content. PFN walkers that read content of random 1021 * pages should check PageOffline() and synchronize with such drivers using 1022 * page_offline_freeze()/page_offline_thaw(). 1023 */ 1024PAGE_TYPE_OPS(Offline, offline, offline) 1025 1026extern void page_offline_freeze(void); 1027extern void page_offline_thaw(void); 1028extern void page_offline_begin(void); 1029extern void page_offline_end(void); 1030 1031/* 1032 * Marks pages in use as page tables. 1033 */ 1034PAGE_TYPE_OPS(Table, table, pgtable) 1035 1036/* 1037 * Marks guardpages used with debug_pagealloc. 1038 */ 1039PAGE_TYPE_OPS(Guard, guard, guard) 1040 1041PAGE_TYPE_OPS(Slab, slab, slab) 1042 1043#ifdef CONFIG_HUGETLB_PAGE 1044FOLIO_TYPE_OPS(hugetlb, hugetlb) 1045#else 1046FOLIO_TEST_FLAG_FALSE(hugetlb) 1047#endif 1048 1049PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) 1050 1051/* 1052 * Mark pages that has to be accepted before touched for the first time. 1053 * 1054 * Serialized with zone lock. 1055 */ 1056PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) 1057PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc) 1058 1059/* 1060 * Marks page_pool allocated pages. 1061 */ 1062PAGE_TYPE_OPS(Netpp, netpp, netpp) 1063 1064/** 1065 * PageHuge - Determine if the page belongs to hugetlbfs 1066 * @page: The page to test. 1067 * 1068 * Context: Any context. 1069 * Return: True for hugetlbfs pages, false for anon pages or pages 1070 * belonging to other filesystems. 1071 */ 1072static inline bool PageHuge(const struct page *page) 1073{ 1074 return folio_test_hugetlb(page_folio(page)); 1075} 1076 1077/* 1078 * Check if a page is currently marked HWPoisoned. Note that this check is 1079 * best effort only and inherently racy: there is no way to synchronize with 1080 * failing hardware. 1081 */ 1082static inline bool is_page_hwpoison(const struct page *page) 1083{ 1084 const struct folio *folio; 1085 1086 if (PageHWPoison(page)) 1087 return true; 1088 folio = page_folio(page); 1089 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); 1090} 1091 1092static inline bool folio_contain_hwpoisoned_page(struct folio *folio) 1093{ 1094 return folio_test_hwpoison(folio) || 1095 (folio_test_large(folio) && folio_test_has_hwpoisoned(folio)); 1096} 1097 1098bool is_free_buddy_page(const struct page *page); 1099 1100#ifdef CONFIG_MIGRATION 1101/* 1102 * This page is migratable through movable_ops (for selected typed pages 1103 * only). 1104 * 1105 * Page migration of such pages might fail, for example, if the page is 1106 * already isolated by somebody else, or if the page is about to get freed. 1107 * 1108 * While a subsystem might set selected typed pages that support page migration 1109 * as being movable through movable_ops, it must never clear this flag. 1110 * 1111 * This flag is only cleared when the page is freed back to the buddy. 1112 * 1113 * Only selected page types support this flag (see page_movable_ops()) and 1114 * the flag might be used in other context for other pages. Always use 1115 * page_has_movable_ops() instead. 1116 */ 1117TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL); 1118SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL); 1119/* 1120 * A movable_ops page has this flag set while it is isolated for migration. 1121 * This flag primarily protects against concurrent migration attempts. 1122 * 1123 * Once migration ended (success or failure), the flag is cleared. The 1124 * flag is managed by the migration core. 1125 */ 1126PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL); 1127#else /* !CONFIG_MIGRATION */ 1128TESTPAGEFLAG_FALSE(MovableOps, movable_ops); 1129SETPAGEFLAG_NOOP(MovableOps, movable_ops); 1130PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated); 1131#endif /* CONFIG_MIGRATION */ 1132 1133/** 1134 * page_has_movable_ops - test for a movable_ops page 1135 * @page: The page to test. 1136 * 1137 * Test whether this is a movable_ops page. Such pages will stay that 1138 * way until freed. 1139 * 1140 * Returns true if this is a movable_ops page, otherwise false. 1141 */ 1142static inline bool page_has_movable_ops(const struct page *page) 1143{ 1144 return PageMovableOps(page) && 1145 (PageOffline(page) || PageZsmalloc(page)); 1146} 1147 1148static __always_inline int PageAnonExclusive(const struct page *page) 1149{ 1150 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1151 /* 1152 * HugeTLB stores this information on the head page; THP keeps it per 1153 * page 1154 */ 1155 if (PageHuge(page)) 1156 page = compound_head(page); 1157 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1158} 1159 1160static __always_inline void SetPageAnonExclusive(struct page *page) 1161{ 1162 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); 1163 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1164 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1165} 1166 1167static __always_inline void ClearPageAnonExclusive(struct page *page) 1168{ 1169 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); 1170 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1171 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1172} 1173 1174static __always_inline void __ClearPageAnonExclusive(struct page *page) 1175{ 1176 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); 1177 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); 1178 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f); 1179} 1180 1181#ifdef CONFIG_MMU 1182#define __PG_MLOCKED (1UL << PG_mlocked) 1183#else 1184#define __PG_MLOCKED 0 1185#endif 1186 1187/* 1188 * Flags checked when a page is freed. Pages being freed should not have 1189 * these flags set. If they are, there is a problem. 1190 */ 1191#define PAGE_FLAGS_CHECK_AT_FREE \ 1192 (1UL << PG_lru | 1UL << PG_locked | \ 1193 1UL << PG_private | 1UL << PG_private_2 | \ 1194 1UL << PG_writeback | 1UL << PG_reserved | \ 1195 1UL << PG_active | \ 1196 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) 1197 1198/* 1199 * Flags checked when a page is prepped for return by the page allocator. 1200 * Pages being prepped should not have these flags set. If they are set, 1201 * there has been a kernel bug or struct page corruption. 1202 * 1203 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's 1204 * alloc-free cycle to prevent from reusing the page. 1205 */ 1206#define PAGE_FLAGS_CHECK_AT_PREP \ 1207 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) 1208 1209/* 1210 * Flags stored in the second page of a compound page. They may overlap 1211 * the CHECK_AT_FREE flags above, so need to be cleared. 1212 */ 1213#define PAGE_FLAGS_SECOND \ 1214 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ 1215 1UL << PG_large_rmappable | 1UL << PG_partially_mapped) 1216 1217#define PAGE_FLAGS_PRIVATE \ 1218 (1UL << PG_private | 1UL << PG_private_2) 1219/** 1220 * folio_has_private - Determine if folio has private stuff 1221 * @folio: The folio to be checked 1222 * 1223 * Determine if a folio has private stuff, indicating that release routines 1224 * should be invoked upon it. 1225 */ 1226static inline int folio_has_private(const struct folio *folio) 1227{ 1228 return !!(folio->flags.f & PAGE_FLAGS_PRIVATE); 1229} 1230 1231#undef PF_ANY 1232#undef PF_HEAD 1233#undef PF_NO_TAIL 1234#undef PF_NO_COMPOUND 1235#undef PF_SECOND 1236#endif /* !__GENERATING_BOUNDS_H */ 1237 1238#endif /* PAGE_FLAGS_H */