Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at ee9dce44362b2d8132c32964656ab6dff7dfbc6a 1384 lines 38 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGETLB_H 3#define _LINUX_HUGETLB_H 4 5#include <linux/mm.h> 6#include <linux/mm_types.h> 7#include <linux/mmdebug.h> 8#include <linux/fs.h> 9#include <linux/hugetlb_inline.h> 10#include <linux/cgroup.h> 11#include <linux/page_ref.h> 12#include <linux/list.h> 13#include <linux/kref.h> 14#include <linux/pgtable.h> 15#include <linux/gfp.h> 16#include <linux/userfaultfd_k.h> 17#include <linux/nodemask.h> 18 19struct mmu_gather; 20struct node; 21 22void free_huge_folio(struct folio *folio); 23 24#ifdef CONFIG_HUGETLB_PAGE 25 26#include <linux/pagemap.h> 27#include <linux/shm.h> 28#include <asm/tlbflush.h> 29 30/* 31 * For HugeTLB page, there are more metadata to save in the struct page. But 32 * the head struct page cannot meet our needs, so we have to abuse other tail 33 * struct page to store the metadata. 34 */ 35#define __NR_USED_SUBPAGE 3 36 37struct hugepage_subpool { 38 spinlock_t lock; 39 long count; 40 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 41 long used_hpages; /* Used count against maximum, includes */ 42 /* both allocated and reserved pages. */ 43 struct hstate *hstate; 44 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 45 long rsv_hpages; /* Pages reserved against global pool to */ 46 /* satisfy minimum size. */ 47}; 48 49struct resv_map { 50 struct kref refs; 51 spinlock_t lock; 52 struct list_head regions; 53 long adds_in_progress; 54 struct list_head region_cache; 55 long region_cache_count; 56 struct rw_semaphore rw_sema; 57#ifdef CONFIG_CGROUP_HUGETLB 58 /* 59 * On private mappings, the counter to uncharge reservations is stored 60 * here. If these fields are 0, then either the mapping is shared, or 61 * cgroup accounting is disabled for this resv_map. 62 */ 63 struct page_counter *reservation_counter; 64 unsigned long pages_per_hpage; 65 struct cgroup_subsys_state *css; 66#endif 67}; 68 69/* 70 * Region tracking -- allows tracking of reservations and instantiated pages 71 * across the pages in a mapping. 72 * 73 * The region data structures are embedded into a resv_map and protected 74 * by a resv_map's lock. The set of regions within the resv_map represent 75 * reservations for huge pages, or huge pages that have already been 76 * instantiated within the map. The from and to elements are huge page 77 * indices into the associated mapping. from indicates the starting index 78 * of the region. to represents the first index past the end of the region. 79 * 80 * For example, a file region structure with from == 0 and to == 4 represents 81 * four huge pages in a mapping. It is important to note that the to element 82 * represents the first element past the end of the region. This is used in 83 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 84 * 85 * Interval notation of the form [from, to) will be used to indicate that 86 * the endpoint from is inclusive and to is exclusive. 87 */ 88struct file_region { 89 struct list_head link; 90 long from; 91 long to; 92#ifdef CONFIG_CGROUP_HUGETLB 93 /* 94 * On shared mappings, each reserved region appears as a struct 95 * file_region in resv_map. These fields hold the info needed to 96 * uncharge each reservation. 97 */ 98 struct page_counter *reservation_counter; 99 struct cgroup_subsys_state *css; 100#endif 101}; 102 103struct hugetlb_vma_lock { 104 struct kref refs; 105 struct rw_semaphore rw_sema; 106 struct vm_area_struct *vma; 107}; 108 109extern struct resv_map *resv_map_alloc(void); 110void resv_map_release(struct kref *ref); 111 112extern spinlock_t hugetlb_lock; 113extern int hugetlb_max_hstate __read_mostly; 114#define for_each_hstate(h) \ 115 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 116 117struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 118 long min_hpages); 119void hugepage_put_subpool(struct hugepage_subpool *spool); 120 121void hugetlb_dup_vma_private(struct vm_area_struct *vma); 122void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 123int move_hugetlb_page_tables(struct vm_area_struct *vma, 124 struct vm_area_struct *new_vma, 125 unsigned long old_addr, unsigned long new_addr, 126 unsigned long len); 127int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, 128 struct vm_area_struct *, struct vm_area_struct *); 129void unmap_hugepage_range(struct vm_area_struct *, 130 unsigned long start, unsigned long end, 131 struct folio *, zap_flags_t); 132void __unmap_hugepage_range(struct mmu_gather *tlb, 133 struct vm_area_struct *vma, 134 unsigned long start, unsigned long end, 135 struct folio *, zap_flags_t zap_flags); 136void hugetlb_report_meminfo(struct seq_file *); 137int hugetlb_report_node_meminfo(char *buf, int len, int nid); 138void hugetlb_show_meminfo_node(int nid); 139unsigned long hugetlb_total_pages(void); 140vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 141 unsigned long address, unsigned int flags); 142#ifdef CONFIG_USERFAULTFD 143int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 144 struct vm_area_struct *dst_vma, 145 unsigned long dst_addr, 146 unsigned long src_addr, 147 uffd_flags_t flags, 148 struct folio **foliop); 149#endif /* CONFIG_USERFAULTFD */ 150long hugetlb_reserve_pages(struct inode *inode, long from, long to, 151 struct vm_area_desc *desc, vma_flags_t vma_flags); 152long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 153 long freed); 154bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); 155int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); 156int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 157 bool *migratable_cleared); 158void folio_putback_hugetlb(struct folio *folio); 159void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); 160void hugetlb_fix_reserve_counts(struct inode *inode); 161extern struct mutex *hugetlb_fault_mutex_table; 162u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 163 164pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 165 unsigned long addr, pud_t *pud); 166bool hugetlbfs_pagecache_present(struct hstate *h, 167 struct vm_area_struct *vma, 168 unsigned long address); 169 170struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); 171 172extern int movable_gigantic_pages __read_mostly; 173extern int sysctl_hugetlb_shm_group __read_mostly; 174extern struct list_head huge_boot_pages[MAX_NUMNODES]; 175 176void hugetlb_bootmem_alloc(void); 177extern nodemask_t hugetlb_bootmem_nodes; 178void hugetlb_bootmem_set_nodes(void); 179 180/* arch callbacks */ 181 182#ifndef CONFIG_HIGHPTE 183/* 184 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures 185 * which may go down to the lowest PTE level in their huge_pte_offset() and 186 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap(). 187 */ 188static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) 189{ 190 return pte_offset_kernel(pmd, address); 191} 192static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, 193 unsigned long address) 194{ 195 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); 196} 197#endif 198 199pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 200 unsigned long addr, unsigned long sz); 201/* 202 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE. 203 * Returns the pte_t* if found, or NULL if the address is not mapped. 204 * 205 * IMPORTANT: we should normally not directly call this function, instead 206 * this is only a common interface to implement arch-specific 207 * walker. Please use hugetlb_walk() instead, because that will attempt to 208 * verify the locking for you. 209 * 210 * Since this function will walk all the pgtable pages (including not only 211 * high-level pgtable page, but also PUD entry that can be unshared 212 * concurrently for VM_SHARED), the caller of this function should be 213 * responsible of its thread safety. One can follow this rule: 214 * 215 * (1) For private mappings: pmd unsharing is not possible, so holding the 216 * mmap_lock for either read or write is sufficient. Most callers 217 * already hold the mmap_lock, so normally, no special action is 218 * required. 219 * 220 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged 221 * pgtable page can go away from under us! It can be done by a pmd 222 * unshare with a follow up munmap() on the other process), then we 223 * need either: 224 * 225 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare 226 * won't happen upon the range (it also makes sure the pte_t we 227 * read is the right and stable one), or, 228 * 229 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make 230 * sure even if unshare happened the racy unmap() will wait until 231 * i_mmap_rwsem is released. 232 * 233 * Option (2.1) is the safest, which guarantees pte stability from pmd 234 * sharing pov, until the vma lock released. Option (2.2) doesn't protect 235 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to 236 * access. 237 */ 238pte_t *huge_pte_offset(struct mm_struct *mm, 239 unsigned long addr, unsigned long sz); 240unsigned long hugetlb_mask_last_page(struct hstate *h); 241int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma, 242 unsigned long addr, pte_t *ptep); 243void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma); 244void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 245 unsigned long *start, unsigned long *end); 246 247extern void __hugetlb_zap_begin(struct vm_area_struct *vma, 248 unsigned long *begin, unsigned long *end); 249extern void __hugetlb_zap_end(struct vm_area_struct *vma, 250 struct zap_details *details); 251 252static inline void hugetlb_zap_begin(struct vm_area_struct *vma, 253 unsigned long *start, unsigned long *end) 254{ 255 if (is_vm_hugetlb_page(vma)) 256 __hugetlb_zap_begin(vma, start, end); 257} 258 259static inline void hugetlb_zap_end(struct vm_area_struct *vma, 260 struct zap_details *details) 261{ 262 if (is_vm_hugetlb_page(vma)) 263 __hugetlb_zap_end(vma, details); 264} 265 266void hugetlb_vma_lock_read(struct vm_area_struct *vma); 267void hugetlb_vma_unlock_read(struct vm_area_struct *vma); 268void hugetlb_vma_lock_write(struct vm_area_struct *vma); 269void hugetlb_vma_unlock_write(struct vm_area_struct *vma); 270int hugetlb_vma_trylock_write(struct vm_area_struct *vma); 271void hugetlb_vma_assert_locked(struct vm_area_struct *vma); 272void hugetlb_vma_lock_release(struct kref *kref); 273long hugetlb_change_protection(struct vm_area_struct *vma, 274 unsigned long address, unsigned long end, pgprot_t newprot, 275 unsigned long cp_flags); 276void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); 277void fixup_hugetlb_reservations(struct vm_area_struct *vma); 278void hugetlb_split(struct vm_area_struct *vma, unsigned long addr); 279int hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 280 281unsigned int arch_hugetlb_cma_order(void); 282 283#else /* !CONFIG_HUGETLB_PAGE */ 284 285static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) 286{ 287} 288 289static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 290{ 291} 292 293static inline unsigned long hugetlb_total_pages(void) 294{ 295 return 0; 296} 297 298static inline struct address_space *hugetlb_folio_mapping_lock_write( 299 struct folio *folio) 300{ 301 return NULL; 302} 303 304static inline int huge_pmd_unshare(struct mmu_gather *tlb, 305 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 306{ 307 return 0; 308} 309 310static inline void huge_pmd_unshare_flush(struct mmu_gather *tlb, 311 struct vm_area_struct *vma) 312{ 313} 314 315static inline void adjust_range_if_pmd_sharing_possible( 316 struct vm_area_struct *vma, 317 unsigned long *start, unsigned long *end) 318{ 319} 320 321static inline void hugetlb_zap_begin( 322 struct vm_area_struct *vma, 323 unsigned long *start, unsigned long *end) 324{ 325} 326 327static inline void hugetlb_zap_end( 328 struct vm_area_struct *vma, 329 struct zap_details *details) 330{ 331} 332 333static inline int copy_hugetlb_page_range(struct mm_struct *dst, 334 struct mm_struct *src, 335 struct vm_area_struct *dst_vma, 336 struct vm_area_struct *src_vma) 337{ 338 BUG(); 339 return 0; 340} 341 342static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, 343 struct vm_area_struct *new_vma, 344 unsigned long old_addr, 345 unsigned long new_addr, 346 unsigned long len) 347{ 348 BUG(); 349 return 0; 350} 351 352static inline void hugetlb_report_meminfo(struct seq_file *m) 353{ 354} 355 356static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) 357{ 358 return 0; 359} 360 361static inline void hugetlb_show_meminfo_node(int nid) 362{ 363} 364 365static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) 366{ 367} 368 369static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 370{ 371} 372 373static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma) 374{ 375} 376 377static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 378{ 379} 380 381static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 382{ 383 return 1; 384} 385 386static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 387{ 388} 389 390static inline int is_hugepage_only_range(struct mm_struct *mm, 391 unsigned long addr, unsigned long len) 392{ 393 return 0; 394} 395 396#ifdef CONFIG_USERFAULTFD 397static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 398 struct vm_area_struct *dst_vma, 399 unsigned long dst_addr, 400 unsigned long src_addr, 401 uffd_flags_t flags, 402 struct folio **foliop) 403{ 404 BUG(); 405 return 0; 406} 407#endif /* CONFIG_USERFAULTFD */ 408 409static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 410 unsigned long sz) 411{ 412 return NULL; 413} 414 415static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) 416{ 417 return false; 418} 419 420static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 421{ 422 return 0; 423} 424 425static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 426 bool *migratable_cleared) 427{ 428 return 0; 429} 430 431static inline void folio_putback_hugetlb(struct folio *folio) 432{ 433} 434 435static inline void move_hugetlb_state(struct folio *old_folio, 436 struct folio *new_folio, int reason) 437{ 438} 439 440static inline long hugetlb_change_protection( 441 struct vm_area_struct *vma, unsigned long address, 442 unsigned long end, pgprot_t newprot, 443 unsigned long cp_flags) 444{ 445 return 0; 446} 447 448static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 449 struct vm_area_struct *vma, unsigned long start, 450 unsigned long end, struct folio *folio, 451 zap_flags_t zap_flags) 452{ 453 BUG(); 454} 455 456static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 457 struct vm_area_struct *vma, unsigned long address, 458 unsigned int flags) 459{ 460 BUG(); 461 return 0; 462} 463 464static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } 465 466static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) 467{ 468} 469 470static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {} 471 472static inline int hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 473{ 474 return 0; 475} 476 477#endif /* !CONFIG_HUGETLB_PAGE */ 478 479#ifndef pgd_write 480static inline int pgd_write(pgd_t pgd) 481{ 482 BUG(); 483 return 0; 484} 485#endif 486 487#define HUGETLB_ANON_FILE "anon_hugepage" 488 489enum { 490 /* 491 * The file will be used as an shm file so shmfs accounting rules 492 * apply 493 */ 494 HUGETLB_SHMFS_INODE = 1, 495 /* 496 * The file is being created on the internal vfs mount and shmfs 497 * accounting rules do not apply 498 */ 499 HUGETLB_ANONHUGE_INODE = 2, 500}; 501 502#ifdef CONFIG_HUGETLBFS 503struct hugetlbfs_sb_info { 504 long max_inodes; /* inodes allowed */ 505 long free_inodes; /* inodes free */ 506 spinlock_t stat_lock; 507 struct hstate *hstate; 508 struct hugepage_subpool *spool; 509 kuid_t uid; 510 kgid_t gid; 511 umode_t mode; 512}; 513 514static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 515{ 516 return sb->s_fs_info; 517} 518 519struct hugetlbfs_inode_info { 520 struct inode vfs_inode; 521 struct resv_map *resv_map; 522 unsigned int seals; 523}; 524 525static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 526{ 527 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 528} 529 530extern const struct vm_operations_struct hugetlb_vm_ops; 531struct file *hugetlb_file_setup(const char *name, size_t size, vma_flags_t acct, 532 int creat_flags, int page_size_log); 533 534static inline bool is_file_hugepages(const struct file *file) 535{ 536 return file->f_op->fop_flags & FOP_HUGE_PAGES; 537} 538 539static inline struct hstate *hstate_inode(struct inode *i) 540{ 541 return HUGETLBFS_SB(i->i_sb)->hstate; 542} 543#else /* !CONFIG_HUGETLBFS */ 544 545#define is_file_hugepages(file) false 546static inline struct file * 547hugetlb_file_setup(const char *name, size_t size, vma_flags_t acctflag, 548 int creat_flags, int page_size_log) 549{ 550 return ERR_PTR(-ENOSYS); 551} 552 553static inline struct hstate *hstate_inode(struct inode *i) 554{ 555 return NULL; 556} 557#endif /* !CONFIG_HUGETLBFS */ 558 559unsigned long 560hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 561 unsigned long len, unsigned long pgoff, 562 unsigned long flags); 563 564/* 565 * huegtlb page specific state flags. These flags are located in page.private 566 * of the hugetlb head page. Functions created via the below macros should be 567 * used to manipulate these flags. 568 * 569 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at 570 * allocation time. Cleared when page is fully instantiated. Free 571 * routine checks flag to restore a reservation on error paths. 572 * Synchronization: Examined or modified by code that knows it has 573 * the only reference to page. i.e. After allocation but before use 574 * or when the page is being freed. 575 * HPG_migratable - Set after a newly allocated page is added to the page 576 * cache and/or page tables. Indicates the page is a candidate for 577 * migration. 578 * Synchronization: Initially set after new page allocation with no 579 * locking. When examined and modified during migration processing 580 * (isolate, migrate, putback) the hugetlb_lock is held. 581 * HPG_temporary - Set on a page that is temporarily allocated from the buddy 582 * allocator. Typically used for migration target pages when no pages 583 * are available in the pool. The hugetlb free page path will 584 * immediately free pages with this flag set to the buddy allocator. 585 * Synchronization: Can be set after huge page allocation from buddy when 586 * code knows it has only reference. All other examinations and 587 * modifications require hugetlb_lock. 588 * HPG_freed - Set when page is on the free lists. 589 * Synchronization: hugetlb_lock held for examination and modification. 590 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. 591 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page 592 * that is not tracked by raw_hwp_page list. 593 */ 594enum hugetlb_page_flags { 595 HPG_restore_reserve = 0, 596 HPG_migratable, 597 HPG_temporary, 598 HPG_freed, 599 HPG_vmemmap_optimized, 600 HPG_raw_hwp_unreliable, 601 HPG_cma, 602 __NR_HPAGEFLAGS, 603}; 604 605/* 606 * Macros to create test, set and clear function definitions for 607 * hugetlb specific page flags. 608 */ 609#ifdef CONFIG_HUGETLB_PAGE 610#define TESTHPAGEFLAG(uname, flname) \ 611static __always_inline \ 612bool folio_test_hugetlb_##flname(struct folio *folio) \ 613 { void *private = &folio->private; \ 614 return test_bit(HPG_##flname, private); \ 615 } 616 617#define SETHPAGEFLAG(uname, flname) \ 618static __always_inline \ 619void folio_set_hugetlb_##flname(struct folio *folio) \ 620 { void *private = &folio->private; \ 621 set_bit(HPG_##flname, private); \ 622 } 623 624#define CLEARHPAGEFLAG(uname, flname) \ 625static __always_inline \ 626void folio_clear_hugetlb_##flname(struct folio *folio) \ 627 { void *private = &folio->private; \ 628 clear_bit(HPG_##flname, private); \ 629 } 630#else 631#define TESTHPAGEFLAG(uname, flname) \ 632static inline bool \ 633folio_test_hugetlb_##flname(struct folio *folio) \ 634 { return 0; } 635 636#define SETHPAGEFLAG(uname, flname) \ 637static inline void \ 638folio_set_hugetlb_##flname(struct folio *folio) \ 639 { } 640 641#define CLEARHPAGEFLAG(uname, flname) \ 642static inline void \ 643folio_clear_hugetlb_##flname(struct folio *folio) \ 644 { } 645#endif 646 647#define HPAGEFLAG(uname, flname) \ 648 TESTHPAGEFLAG(uname, flname) \ 649 SETHPAGEFLAG(uname, flname) \ 650 CLEARHPAGEFLAG(uname, flname) \ 651 652/* 653 * Create functions associated with hugetlb page flags 654 */ 655HPAGEFLAG(RestoreReserve, restore_reserve) 656HPAGEFLAG(Migratable, migratable) 657HPAGEFLAG(Temporary, temporary) 658HPAGEFLAG(Freed, freed) 659HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) 660HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) 661HPAGEFLAG(Cma, cma) 662 663#ifdef CONFIG_HUGETLB_PAGE 664 665#define HSTATE_NAME_LEN 32 666/* Defines one hugetlb page size */ 667struct hstate { 668 struct mutex resize_lock; 669 struct lock_class_key resize_key; 670 int next_nid_to_alloc; 671 int next_nid_to_free; 672 unsigned int order; 673 unsigned int demote_order; 674 unsigned long mask; 675 unsigned long max_huge_pages; 676 unsigned long nr_huge_pages; 677 unsigned long free_huge_pages; 678 unsigned long resv_huge_pages; 679 unsigned long surplus_huge_pages; 680 unsigned long nr_overcommit_huge_pages; 681 struct list_head hugepage_activelist; 682 struct list_head hugepage_freelists[MAX_NUMNODES]; 683 unsigned int max_huge_pages_node[MAX_NUMNODES]; 684 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 685 unsigned int free_huge_pages_node[MAX_NUMNODES]; 686 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 687 char name[HSTATE_NAME_LEN]; 688}; 689 690struct cma; 691 692struct huge_bootmem_page { 693 struct list_head list; 694 struct hstate *hstate; 695 unsigned long flags; 696 struct cma *cma; 697}; 698 699#define HUGE_BOOTMEM_HVO 0x0001 700#define HUGE_BOOTMEM_ZONES_VALID 0x0002 701#define HUGE_BOOTMEM_CMA 0x0004 702 703bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m); 704 705int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list); 706int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); 707void wait_for_freed_hugetlb_folios(void); 708struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 709 unsigned long addr, bool cow_from_owner); 710struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 711 nodemask_t *nmask, gfp_t gfp_mask, 712 bool allow_alloc_fallback); 713struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 714 nodemask_t *nmask, gfp_t gfp_mask); 715 716int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 717 pgoff_t idx); 718void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 719 unsigned long address, struct folio *folio); 720 721/* arch callback */ 722int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); 723int __init alloc_bootmem_huge_page(struct hstate *h, int nid); 724bool __init hugetlb_node_alloc_supported(void); 725 726void __init hugetlb_add_hstate(unsigned order); 727bool __init arch_hugetlb_valid_size(unsigned long size); 728struct hstate *size_to_hstate(unsigned long size); 729 730#ifndef HUGE_MAX_HSTATE 731#define HUGE_MAX_HSTATE 1 732#endif 733 734extern struct hstate hstates[HUGE_MAX_HSTATE]; 735extern unsigned int default_hstate_idx; 736 737#define default_hstate (hstates[default_hstate_idx]) 738 739static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 740{ 741 return HUGETLBFS_SB(inode->i_sb)->spool; 742} 743 744static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) 745{ 746 return folio->_hugetlb_subpool; 747} 748 749static inline void hugetlb_set_folio_subpool(struct folio *folio, 750 struct hugepage_subpool *subpool) 751{ 752 folio->_hugetlb_subpool = subpool; 753} 754 755static inline struct hstate *hstate_file(struct file *f) 756{ 757 return hstate_inode(file_inode(f)); 758} 759 760static inline struct hstate *hstate_sizelog(int page_size_log) 761{ 762 if (!page_size_log) 763 return &default_hstate; 764 765 if (page_size_log < BITS_PER_LONG) 766 return size_to_hstate(1UL << page_size_log); 767 768 return NULL; 769} 770 771static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 772{ 773 return hstate_file(vma->vm_file); 774} 775 776static inline unsigned long huge_page_size(const struct hstate *h) 777{ 778 return (unsigned long)PAGE_SIZE << h->order; 779} 780 781static inline unsigned long huge_page_mask(struct hstate *h) 782{ 783 return h->mask; 784} 785 786static inline unsigned int huge_page_order(struct hstate *h) 787{ 788 return h->order; 789} 790 791static inline unsigned huge_page_shift(struct hstate *h) 792{ 793 return h->order + PAGE_SHIFT; 794} 795 796/** 797 * hugetlb_linear_page_index() - linear_page_index() but in hugetlb 798 * page size granularity. 799 * @vma: the hugetlb VMA 800 * @address: the virtual address within the VMA 801 * 802 * Return: the page offset within the mapping in huge page units. 803 */ 804static inline pgoff_t hugetlb_linear_page_index(struct vm_area_struct *vma, 805 unsigned long address) 806{ 807 struct hstate *h = hstate_vma(vma); 808 809 return ((address - vma->vm_start) >> huge_page_shift(h)) + 810 (vma->vm_pgoff >> huge_page_order(h)); 811} 812 813static inline bool order_is_gigantic(unsigned int order) 814{ 815 return order > MAX_PAGE_ORDER; 816} 817 818static inline bool hstate_is_gigantic(struct hstate *h) 819{ 820 return order_is_gigantic(huge_page_order(h)); 821} 822 823static inline unsigned int pages_per_huge_page(const struct hstate *h) 824{ 825 return 1 << h->order; 826} 827 828static inline unsigned int blocks_per_huge_page(struct hstate *h) 829{ 830 return huge_page_size(h) / 512; 831} 832 833static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, 834 struct address_space *mapping, pgoff_t idx) 835{ 836 return filemap_lock_folio(mapping, idx << huge_page_order(h)); 837} 838 839#include <asm/hugetlb.h> 840 841#ifndef is_hugepage_only_range 842static inline int is_hugepage_only_range(struct mm_struct *mm, 843 unsigned long addr, unsigned long len) 844{ 845 return 0; 846} 847#define is_hugepage_only_range is_hugepage_only_range 848#endif 849 850#ifndef arch_clear_hugetlb_flags 851static inline void arch_clear_hugetlb_flags(struct folio *folio) { } 852#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags 853#endif 854 855#ifndef arch_make_huge_pte 856static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, 857 vm_flags_t flags) 858{ 859 return pte_mkhuge(entry); 860} 861#endif 862 863#ifndef arch_has_huge_bootmem_alloc 864/* 865 * Some architectures do their own bootmem allocation, so they can't use 866 * early CMA allocation. 867 */ 868static inline bool arch_has_huge_bootmem_alloc(void) 869{ 870 return false; 871} 872#endif 873 874static inline struct hstate *folio_hstate(struct folio *folio) 875{ 876 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); 877 return size_to_hstate(folio_size(folio)); 878} 879 880static inline unsigned hstate_index_to_shift(unsigned index) 881{ 882 return hstates[index].order + PAGE_SHIFT; 883} 884 885static inline int hstate_index(struct hstate *h) 886{ 887 return h - hstates; 888} 889 890int dissolve_free_hugetlb_folio(struct folio *folio); 891int dissolve_free_hugetlb_folios(unsigned long start_pfn, 892 unsigned long end_pfn); 893 894#ifdef CONFIG_MEMORY_FAILURE 895extern void folio_clear_hugetlb_hwpoison(struct folio *folio); 896#else 897static inline void folio_clear_hugetlb_hwpoison(struct folio *folio) 898{ 899} 900#endif 901 902#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 903#ifndef arch_hugetlb_migration_supported 904static inline bool arch_hugetlb_migration_supported(struct hstate *h) 905{ 906 if ((huge_page_shift(h) == PMD_SHIFT) || 907 (huge_page_shift(h) == PUD_SHIFT) || 908 (huge_page_shift(h) == PGDIR_SHIFT)) 909 return true; 910 else 911 return false; 912} 913#endif 914#else 915static inline bool arch_hugetlb_migration_supported(struct hstate *h) 916{ 917 return false; 918} 919#endif 920 921static inline bool hugepage_migration_supported(struct hstate *h) 922{ 923 return arch_hugetlb_migration_supported(h); 924} 925 926/* 927 * Movability check is different as compared to migration check. 928 * It determines whether or not a huge page should be placed on 929 * movable zone or not. Movability of any huge page should be 930 * required only if huge page size is supported for migration. 931 * There won't be any reason for the huge page to be movable if 932 * it is not migratable to start with. Also the size of the huge 933 * page should be large enough to be placed under a movable zone 934 * and still feasible enough to be migratable. Just the presence 935 * in movable zone does not make the migration feasible. 936 * 937 * So even though large huge page sizes like the gigantic ones 938 * are migratable they should not be movable because its not 939 * feasible to migrate them from movable zone. 940 */ 941static inline bool hugepage_movable_supported(struct hstate *h) 942{ 943 if (!hugepage_migration_supported(h)) 944 return false; 945 946 if (hstate_is_gigantic(h) && !movable_gigantic_pages) 947 return false; 948 return true; 949} 950 951/* Movability of hugepages depends on migration support. */ 952static inline gfp_t htlb_alloc_mask(struct hstate *h) 953{ 954 gfp_t gfp = __GFP_COMP | __GFP_NOWARN; 955 956 gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER; 957 958 return gfp; 959} 960 961static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 962{ 963 gfp_t modified_mask = htlb_alloc_mask(h); 964 965 /* Some callers might want to enforce node */ 966 modified_mask |= (gfp_mask & __GFP_THISNODE); 967 968 modified_mask |= (gfp_mask & __GFP_NOWARN); 969 970 return modified_mask; 971} 972 973static inline bool htlb_allow_alloc_fallback(int reason) 974{ 975 bool allowed_fallback = false; 976 977 /* 978 * Note: the memory offline, memory failure and migration syscalls will 979 * be allowed to fallback to other nodes due to lack of a better chioce, 980 * that might break the per-node hugetlb pool. While other cases will 981 * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool. 982 */ 983 switch (reason) { 984 case MR_MEMORY_HOTPLUG: 985 case MR_MEMORY_FAILURE: 986 case MR_SYSCALL: 987 case MR_MEMPOLICY_MBIND: 988 allowed_fallback = true; 989 break; 990 default: 991 break; 992 } 993 994 return allowed_fallback; 995} 996 997static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 998 struct mm_struct *mm, pte_t *pte) 999{ 1000 const unsigned long size = huge_page_size(h); 1001 1002 VM_WARN_ON(size == PAGE_SIZE); 1003 1004 /* 1005 * hugetlb must use the exact same PT locks as core-mm page table 1006 * walkers would. When modifying a PTE table, hugetlb must take the 1007 * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD 1008 * PT lock etc. 1009 * 1010 * The expectation is that any hugetlb folio smaller than a PMD is 1011 * always mapped into a single PTE table and that any hugetlb folio 1012 * smaller than a PUD (but at least as big as a PMD) is always mapped 1013 * into a single PMD table. 1014 * 1015 * If that does not hold for an architecture, then that architecture 1016 * must disable split PT locks such that all *_lockptr() functions 1017 * will give us the same result: the per-MM PT lock. 1018 * 1019 * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where 1020 * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr() 1021 * and core-mm would use pmd_lockptr(). However, in such configurations 1022 * split PMD locks are disabled -- they don't make sense on a single 1023 * PGDIR page table -- and the end result is the same. 1024 */ 1025 if (size >= PUD_SIZE) 1026 return pud_lockptr(mm, (pud_t *) pte); 1027 else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE)) 1028 return pmd_lockptr(mm, (pmd_t *) pte); 1029 /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */ 1030 return ptep_lockptr(mm, pte); 1031} 1032 1033#ifndef hugepages_supported 1034/* 1035 * Some platform decide whether they support huge pages at boot 1036 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 1037 * when there is no such support 1038 */ 1039#define hugepages_supported() (HPAGE_SHIFT != 0) 1040#endif 1041 1042void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 1043 1044static inline void hugetlb_count_init(struct mm_struct *mm) 1045{ 1046 atomic_long_set(&mm->hugetlb_usage, 0); 1047} 1048 1049static inline void hugetlb_count_add(long l, struct mm_struct *mm) 1050{ 1051 atomic_long_add(l, &mm->hugetlb_usage); 1052} 1053 1054static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1055{ 1056 atomic_long_sub(l, &mm->hugetlb_usage); 1057} 1058 1059#ifndef huge_ptep_modify_prot_start 1060#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 1061static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 1062 unsigned long addr, pte_t *ptep) 1063{ 1064 unsigned long psize = huge_page_size(hstate_vma(vma)); 1065 1066 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); 1067} 1068#endif 1069 1070#ifndef huge_ptep_modify_prot_commit 1071#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 1072static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 1073 unsigned long addr, pte_t *ptep, 1074 pte_t old_pte, pte_t pte) 1075{ 1076 unsigned long psize = huge_page_size(hstate_vma(vma)); 1077 1078 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 1079} 1080#endif 1081 1082#ifdef CONFIG_NUMA 1083void hugetlb_register_node(struct node *node); 1084void hugetlb_unregister_node(struct node *node); 1085#endif 1086 1087/* 1088 * Check if a given raw @page in a hugepage is HWPOISON. 1089 */ 1090bool is_raw_hwpoison_page_in_hugepage(struct page *page); 1091 1092static inline unsigned long huge_page_mask_align(struct file *file) 1093{ 1094 return PAGE_MASK & ~huge_page_mask(hstate_file(file)); 1095} 1096 1097#else /* CONFIG_HUGETLB_PAGE */ 1098struct hstate {}; 1099 1100static inline unsigned long huge_page_mask_align(struct file *file) 1101{ 1102 return 0; 1103} 1104 1105static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) 1106{ 1107 return NULL; 1108} 1109 1110static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, 1111 struct address_space *mapping, pgoff_t idx) 1112{ 1113 return NULL; 1114} 1115 1116static inline int isolate_or_dissolve_huge_folio(struct folio *folio, 1117 struct list_head *list) 1118{ 1119 return -ENOMEM; 1120} 1121 1122static inline int replace_free_hugepage_folios(unsigned long start_pfn, 1123 unsigned long end_pfn) 1124{ 1125 return 0; 1126} 1127 1128static inline void wait_for_freed_hugetlb_folios(void) 1129{ 1130} 1131 1132static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 1133 unsigned long addr, 1134 bool cow_from_owner) 1135{ 1136 return NULL; 1137} 1138 1139static inline struct folio * 1140alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, 1141 nodemask_t *nmask, gfp_t gfp_mask) 1142{ 1143 return NULL; 1144} 1145 1146static inline struct folio * 1147alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 1148 nodemask_t *nmask, gfp_t gfp_mask, 1149 bool allow_alloc_fallback) 1150{ 1151 return NULL; 1152} 1153 1154static inline int __alloc_bootmem_huge_page(struct hstate *h) 1155{ 1156 return 0; 1157} 1158 1159static inline struct hstate *hstate_file(struct file *f) 1160{ 1161 return NULL; 1162} 1163 1164static inline struct hstate *hstate_sizelog(int page_size_log) 1165{ 1166 return NULL; 1167} 1168 1169static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 1170{ 1171 return NULL; 1172} 1173 1174static inline struct hstate *folio_hstate(struct folio *folio) 1175{ 1176 return NULL; 1177} 1178 1179static inline struct hstate *size_to_hstate(unsigned long size) 1180{ 1181 return NULL; 1182} 1183 1184static inline unsigned long huge_page_size(struct hstate *h) 1185{ 1186 return PAGE_SIZE; 1187} 1188 1189static inline unsigned long huge_page_mask(struct hstate *h) 1190{ 1191 return PAGE_MASK; 1192} 1193 1194static inline unsigned int huge_page_order(struct hstate *h) 1195{ 1196 return 0; 1197} 1198 1199static inline unsigned int huge_page_shift(struct hstate *h) 1200{ 1201 return PAGE_SHIFT; 1202} 1203 1204static inline bool hstate_is_gigantic(struct hstate *h) 1205{ 1206 return false; 1207} 1208 1209static inline unsigned int pages_per_huge_page(struct hstate *h) 1210{ 1211 return 1; 1212} 1213 1214static inline unsigned hstate_index_to_shift(unsigned index) 1215{ 1216 return 0; 1217} 1218 1219static inline int hstate_index(struct hstate *h) 1220{ 1221 return 0; 1222} 1223 1224static inline int dissolve_free_hugetlb_folio(struct folio *folio) 1225{ 1226 return 0; 1227} 1228 1229static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn, 1230 unsigned long end_pfn) 1231{ 1232 return 0; 1233} 1234 1235static inline bool hugepage_migration_supported(struct hstate *h) 1236{ 1237 return false; 1238} 1239 1240static inline bool hugepage_movable_supported(struct hstate *h) 1241{ 1242 return false; 1243} 1244 1245static inline gfp_t htlb_alloc_mask(struct hstate *h) 1246{ 1247 return 0; 1248} 1249 1250static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 1251{ 1252 return 0; 1253} 1254 1255static inline bool htlb_allow_alloc_fallback(int reason) 1256{ 1257 return false; 1258} 1259 1260static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 1261 struct mm_struct *mm, pte_t *pte) 1262{ 1263 return &mm->page_table_lock; 1264} 1265 1266static inline void hugetlb_count_init(struct mm_struct *mm) 1267{ 1268} 1269 1270static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 1271{ 1272} 1273 1274static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1275{ 1276} 1277 1278static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, 1279 unsigned long addr, pte_t *ptep) 1280{ 1281#ifdef CONFIG_MMU 1282 return ptep_get(ptep); 1283#else 1284 return *ptep; 1285#endif 1286} 1287 1288static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 1289 pte_t *ptep, pte_t pte, unsigned long sz) 1290{ 1291} 1292 1293static inline void hugetlb_register_node(struct node *node) 1294{ 1295} 1296 1297static inline void hugetlb_unregister_node(struct node *node) 1298{ 1299} 1300 1301static inline bool hugetlbfs_pagecache_present( 1302 struct hstate *h, struct vm_area_struct *vma, unsigned long address) 1303{ 1304 return false; 1305} 1306 1307static inline void hugetlb_bootmem_alloc(void) 1308{ 1309} 1310#endif /* CONFIG_HUGETLB_PAGE */ 1311 1312static inline spinlock_t *huge_pte_lock(struct hstate *h, 1313 struct mm_struct *mm, pte_t *pte) 1314{ 1315 spinlock_t *ptl; 1316 1317 ptl = huge_pte_lockptr(h, mm, pte); 1318 spin_lock(ptl); 1319 return ptl; 1320} 1321 1322#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 1323extern void __init hugetlb_cma_reserve(void); 1324#else 1325static inline __init void hugetlb_cma_reserve(void) 1326{ 1327} 1328#endif 1329 1330#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 1331static inline bool hugetlb_pmd_shared(pte_t *pte) 1332{ 1333 return ptdesc_pmd_is_shared(virt_to_ptdesc(pte)); 1334} 1335#else 1336static inline bool hugetlb_pmd_shared(pte_t *pte) 1337{ 1338 return false; 1339} 1340#endif 1341 1342bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); 1343 1344#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 1345/* 1346 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 1347 * implement this. 1348 */ 1349#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1350#endif 1351 1352static inline bool __vma_shareable_lock(struct vm_area_struct *vma) 1353{ 1354 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; 1355} 1356 1357bool __vma_private_lock(struct vm_area_struct *vma); 1358 1359/* 1360 * Safe version of huge_pte_offset() to check the locks. See comments 1361 * above huge_pte_offset(). 1362 */ 1363static inline pte_t * 1364hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) 1365{ 1366#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP) 1367 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1368 1369 /* 1370 * If pmd sharing possible, locking needed to safely walk the 1371 * hugetlb pgtables. More information can be found at the comment 1372 * above huge_pte_offset() in the same file. 1373 * 1374 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP. 1375 */ 1376 if (__vma_shareable_lock(vma)) 1377 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) && 1378 !lockdep_is_held( 1379 &vma->vm_file->f_mapping->i_mmap_rwsem)); 1380#endif 1381 return huge_pte_offset(vma->vm_mm, addr, sz); 1382} 1383 1384#endif /* _LINUX_HUGETLB_H */