Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: replace pmd_to_swp_entry() with softleaf_from_pmd()

Introduce softleaf_from_pmd() to do the equivalent operation for PMDs that
softleaf_from_pte() fulfils, and cascade changes through code base
accordingly, introducing helpers as necessary.

We are then able to eliminate pmd_to_swp_entry(),
is_pmd_migration_entry(), is_pmd_device_private_entry() and
is_pmd_non_present_folio_entry().

This further establishes the use of leaf operations throughout the code
base and further establishes the foundations for eliminating
is_swap_pmd().

No functional change intended.

[lorenzo.stoakes@oracle.com: check writable, not readable/writable, per Vlastimil]
Link: https://lkml.kernel.org/r/cd97b6ec-00f9-45a4-9ae0-8f009c212a94@lucifer.local
Link: https://lkml.kernel.org/r/3fb431699639ded8fdc63d2210aa77a38c8891f1.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: SeongJae Park <sj@kernel.org>\
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
0ac881ef 5dfa7916

+339 -225
+13 -14
fs/proc/task_mmu.c
··· 1065 1065 page = vm_normal_page_pmd(vma, addr, *pmd); 1066 1066 present = true; 1067 1067 } else if (unlikely(thp_migration_supported())) { 1068 - swp_entry_t entry = pmd_to_swp_entry(*pmd); 1068 + const softleaf_t entry = softleaf_from_pmd(*pmd); 1069 1069 1070 - if (is_pfn_swap_entry(entry)) 1071 - page = pfn_swap_entry_to_page(entry); 1070 + if (softleaf_has_pfn(entry)) 1071 + page = softleaf_to_page(entry); 1072 1072 } 1073 1073 if (IS_ERR_OR_NULL(page)) 1074 1074 return; ··· 1655 1655 pmd = pmd_clear_soft_dirty(pmd); 1656 1656 1657 1657 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1658 - } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 1658 + } else if (pmd_is_migration_entry(pmd)) { 1659 1659 pmd = pmd_swp_clear_soft_dirty(pmd); 1660 1660 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1661 1661 } ··· 2016 2016 if (pm->show_pfn) 2017 2017 frame = pmd_pfn(pmd) + idx; 2018 2018 } else if (thp_migration_supported()) { 2019 - swp_entry_t entry = pmd_to_swp_entry(pmd); 2019 + const softleaf_t entry = softleaf_from_pmd(pmd); 2020 2020 unsigned long offset; 2021 2021 2022 2022 if (pm->show_pfn) { 2023 - if (is_pfn_swap_entry(entry)) 2024 - offset = swp_offset_pfn(entry) + idx; 2023 + if (softleaf_has_pfn(entry)) 2024 + offset = softleaf_to_pfn(entry) + idx; 2025 2025 else 2026 2026 offset = swp_offset(entry) + idx; 2027 2027 frame = swp_type(entry) | ··· 2032 2032 flags |= PM_SOFT_DIRTY; 2033 2033 if (pmd_swp_uffd_wp(pmd)) 2034 2034 flags |= PM_UFFD_WP; 2035 - VM_WARN_ON_ONCE(!is_pmd_migration_entry(pmd)); 2035 + VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd)); 2036 2036 page = pfn_swap_entry_to_page(entry); 2037 2037 } 2038 2038 ··· 2426 2426 if (pmd_soft_dirty(pmd)) 2427 2427 categories |= PAGE_IS_SOFT_DIRTY; 2428 2428 } else { 2429 - swp_entry_t swp; 2430 - 2431 2429 categories |= PAGE_IS_SWAPPED; 2432 2430 if (!pmd_swp_uffd_wp(pmd)) 2433 2431 categories |= PAGE_IS_WRITTEN; ··· 2433 2435 categories |= PAGE_IS_SOFT_DIRTY; 2434 2436 2435 2437 if (p->masks_of_interest & PAGE_IS_FILE) { 2436 - swp = pmd_to_swp_entry(pmd); 2437 - if (is_pfn_swap_entry(swp) && 2438 - !folio_test_anon(pfn_swap_entry_folio(swp))) 2438 + const softleaf_t entry = softleaf_from_pmd(pmd); 2439 + 2440 + if (softleaf_has_pfn(entry) && 2441 + !folio_test_anon(softleaf_to_folio(entry))) 2439 2442 categories |= PAGE_IS_FILE; 2440 2443 } 2441 2444 } ··· 2453 2454 old = pmdp_invalidate_ad(vma, addr, pmdp); 2454 2455 pmd = pmd_mkuffd_wp(old); 2455 2456 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 2456 - } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 2457 + } else if (pmd_is_migration_entry(pmd)) { 2457 2458 pmd = pmd_swp_mkuffd_wp(pmd); 2458 2459 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 2459 2460 }
+216 -2
include/linux/leafops.h
··· 62 62 } 63 63 64 64 /** 65 + * softleaf_to_pte() - Obtain a PTE entry from a leaf entry. 66 + * @entry: Leaf entry. 67 + * 68 + * This generates an architecture-specific PTE entry that can be utilised to 69 + * encode the metadata the leaf entry encodes. 70 + * 71 + * Returns: Architecture-specific PTE entry encoding leaf entry. 72 + */ 73 + static inline pte_t softleaf_to_pte(softleaf_t entry) 74 + { 75 + /* Temporary until swp_entry_t eliminated. */ 76 + return swp_entry_to_pte(entry); 77 + } 78 + 79 + #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 80 + /** 81 + * softleaf_from_pmd() - Obtain a leaf entry from a PMD entry. 82 + * @pmd: PMD entry. 83 + * 84 + * If @pmd is present (therefore not a leaf entry) the function returns an empty 85 + * leaf entry. Otherwise, it returns a leaf entry. 86 + * 87 + * Returns: Leaf entry. 88 + */ 89 + static inline softleaf_t softleaf_from_pmd(pmd_t pmd) 90 + { 91 + softleaf_t arch_entry; 92 + 93 + if (pmd_present(pmd) || pmd_none(pmd)) 94 + return softleaf_mk_none(); 95 + 96 + if (pmd_swp_soft_dirty(pmd)) 97 + pmd = pmd_swp_clear_soft_dirty(pmd); 98 + if (pmd_swp_uffd_wp(pmd)) 99 + pmd = pmd_swp_clear_uffd_wp(pmd); 100 + arch_entry = __pmd_to_swp_entry(pmd); 101 + 102 + /* Temporary until swp_entry_t eliminated. */ 103 + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 104 + } 105 + 106 + #else 107 + 108 + static inline softleaf_t softleaf_from_pmd(pmd_t pmd) 109 + { 110 + return softleaf_mk_none(); 111 + } 112 + 113 + #endif 114 + 115 + /** 65 116 * softleaf_is_none() - Is the leaf entry empty? 66 117 * @entry: Leaf entry. 67 118 * ··· 186 135 } 187 136 188 137 /** 138 + * softleaf_is_migration_write() - Is this leaf entry a writable migration entry? 139 + * @entry: Leaf entry. 140 + * 141 + * Returns: true if the leaf entry is a writable migration entry, otherwise 142 + * false. 143 + */ 144 + static inline bool softleaf_is_migration_write(softleaf_t entry) 145 + { 146 + return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE; 147 + } 148 + 149 + /** 150 + * softleaf_is_migration_read() - Is this leaf entry a readable migration entry? 151 + * @entry: Leaf entry. 152 + * 153 + * Returns: true if the leaf entry is a readable migration entry, otherwise 154 + * false. 155 + */ 156 + static inline bool softleaf_is_migration_read(softleaf_t entry) 157 + { 158 + return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ; 159 + } 160 + 161 + /** 162 + * softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive 163 + * readable migration entry? 164 + * @entry: Leaf entry. 165 + * 166 + * Returns: true if the leaf entry is an exclusive readable migration entry, 167 + * otherwise false. 168 + */ 169 + static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry) 170 + { 171 + return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE; 172 + } 173 + 174 + /** 189 175 * softleaf_is_migration() - Is this leaf entry a migration entry? 190 176 * @entry: Leaf entry. 191 177 * ··· 238 150 default: 239 151 return false; 240 152 } 153 + } 154 + 155 + /** 156 + * softleaf_is_device_private_write() - Is this leaf entry a device private 157 + * writable entry? 158 + * @entry: Leaf entry. 159 + * 160 + * Returns: true if the leaf entry is a device private writable entry, otherwise 161 + * false. 162 + */ 163 + static inline bool softleaf_is_device_private_write(softleaf_t entry) 164 + { 165 + return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE; 241 166 } 242 167 243 168 /** ··· 271 170 } 272 171 273 172 /** 274 - * softleaf_is_device_exclusive() - Is this leaf entry a device exclusive entry? 173 + * softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry? 275 174 * @entry: Leaf entry. 276 175 * 277 - * Returns: true if the leaf entry is a device exclusive entry, otherwise false. 176 + * Returns: true if the leaf entry is a device-exclusive entry, otherwise false. 278 177 */ 279 178 static inline bool softleaf_is_device_exclusive(softleaf_t entry) 280 179 { ··· 433 332 return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP; 434 333 } 435 334 335 + #ifdef CONFIG_MIGRATION 336 + 337 + /** 338 + * softleaf_is_migration_young() - Does this migration entry contain an accessed 339 + * bit? 340 + * @entry: Leaf entry. 341 + * 342 + * If the architecture can support storing A/D bits in migration entries, this 343 + * determines whether the accessed (or 'young') bit was set on the migrated page 344 + * table entry. 345 + * 346 + * Returns: true if the entry contains an accessed bit, otherwise false. 347 + */ 348 + static inline bool softleaf_is_migration_young(softleaf_t entry) 349 + { 350 + VM_WARN_ON_ONCE(!softleaf_is_migration(entry)); 351 + 352 + if (migration_entry_supports_ad()) 353 + return swp_offset(entry) & SWP_MIG_YOUNG; 354 + /* Keep the old behavior of aging page after migration */ 355 + return false; 356 + } 357 + 358 + /** 359 + * softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit? 360 + * @entry: Leaf entry. 361 + * 362 + * If the architecture can support storing A/D bits in migration entries, this 363 + * determines whether the dirty bit was set on the migrated page table entry. 364 + * 365 + * Returns: true if the entry contains a dirty bit, otherwise false. 366 + */ 367 + static inline bool softleaf_is_migration_dirty(softleaf_t entry) 368 + { 369 + VM_WARN_ON_ONCE(!softleaf_is_migration(entry)); 370 + 371 + if (migration_entry_supports_ad()) 372 + return swp_offset(entry) & SWP_MIG_DIRTY; 373 + /* Keep the old behavior of clean page after migration */ 374 + return false; 375 + } 376 + 377 + #else /* CONFIG_MIGRATION */ 378 + 379 + static inline bool softleaf_is_migration_young(softleaf_t entry) 380 + { 381 + return false; 382 + } 383 + 384 + static inline bool softleaf_is_migration_dirty(softleaf_t entry) 385 + { 386 + return false; 387 + } 388 + #endif /* CONFIG_MIGRATION */ 389 + 436 390 /** 437 391 * pte_is_marker() - Does the PTE entry encode a marker leaf entry? 438 392 * @pte: PTE entry. ··· 537 381 return true; 538 382 539 383 return false; 384 + } 385 + 386 + #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION) 387 + 388 + /** 389 + * pmd_is_device_private_entry() - Check if PMD contains a device private swap 390 + * entry. 391 + * @pmd: The PMD to check. 392 + * 393 + * Returns true if the PMD contains a swap entry that represents a device private 394 + * page mapping. This is used for zone device private pages that have been 395 + * swapped out but still need special handling during various memory management 396 + * operations. 397 + * 398 + * Return: true if PMD contains device private entry, false otherwise 399 + */ 400 + static inline bool pmd_is_device_private_entry(pmd_t pmd) 401 + { 402 + return softleaf_is_device_private(softleaf_from_pmd(pmd)); 403 + } 404 + 405 + #else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ 406 + 407 + static inline bool pmd_is_device_private_entry(pmd_t pmd) 408 + { 409 + return false; 410 + } 411 + 412 + #endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ 413 + 414 + /** 415 + * pmd_is_migration_entry() - Does this PMD entry encode a migration entry? 416 + * @pmd: PMD entry. 417 + * 418 + * Returns: true if the PMD encodes a migration entry, otherwise false. 419 + */ 420 + static inline bool pmd_is_migration_entry(pmd_t pmd) 421 + { 422 + return softleaf_is_migration(softleaf_from_pmd(pmd)); 423 + } 424 + 425 + /** 426 + * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry? 427 + * @pmd: PMD entry. 428 + * 429 + * PMD leaf entries are valid only if they are device private or migration 430 + * entries. This function asserts that a PMD leaf entry is valid in this 431 + * respect. 432 + * 433 + * Returns: true if the PMD entry is a valid leaf entry, otherwise false. 434 + */ 435 + static inline bool pmd_is_valid_softleaf(pmd_t pmd) 436 + { 437 + const softleaf_t entry = softleaf_from_pmd(pmd); 438 + 439 + /* Only device private, migration entries valid for PMD. */ 440 + return softleaf_is_device_private(entry) || 441 + softleaf_is_migration(entry); 540 442 } 541 443 542 444 #endif /* CONFIG_MMU */
+1 -1
include/linux/migrate.h
··· 65 65 66 66 int migrate_huge_page_move_mapping(struct address_space *mapping, 67 67 struct folio *dst, struct folio *src); 68 - void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 68 + void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl) 69 69 __releases(ptl); 70 70 void folio_migrate_flags(struct folio *newfolio, struct folio *folio); 71 71 int folio_migrate_mapping(struct address_space *mapping,
-100
include/linux/swapops.h
··· 283 283 return entry; 284 284 } 285 285 286 - static inline bool is_migration_entry_young(swp_entry_t entry) 287 - { 288 - if (migration_entry_supports_ad()) 289 - return swp_offset(entry) & SWP_MIG_YOUNG; 290 - /* Keep the old behavior of aging page after migration */ 291 - return false; 292 - } 293 - 294 286 static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) 295 287 { 296 288 if (migration_entry_supports_ad()) 297 289 return swp_entry(swp_type(entry), 298 290 swp_offset(entry) | SWP_MIG_DIRTY); 299 291 return entry; 300 - } 301 - 302 - static inline bool is_migration_entry_dirty(swp_entry_t entry) 303 - { 304 - if (migration_entry_supports_ad()) 305 - return swp_offset(entry) & SWP_MIG_DIRTY; 306 - /* Keep the old behavior of clean page after migration */ 307 - return false; 308 292 } 309 293 310 294 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, ··· 333 349 return entry; 334 350 } 335 351 336 - static inline bool is_migration_entry_young(swp_entry_t entry) 337 - { 338 - return false; 339 - } 340 - 341 352 static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) 342 353 { 343 354 return entry; 344 355 } 345 356 346 - static inline bool is_migration_entry_dirty(swp_entry_t entry) 347 - { 348 - return false; 349 - } 350 357 #endif /* CONFIG_MIGRATION */ 351 358 352 359 #ifdef CONFIG_MEMORY_FAILURE ··· 462 487 463 488 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); 464 489 465 - static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 466 - { 467 - swp_entry_t arch_entry; 468 - 469 - if (pmd_swp_soft_dirty(pmd)) 470 - pmd = pmd_swp_clear_soft_dirty(pmd); 471 - if (pmd_swp_uffd_wp(pmd)) 472 - pmd = pmd_swp_clear_uffd_wp(pmd); 473 - arch_entry = __pmd_to_swp_entry(pmd); 474 - return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 475 - } 476 - 477 490 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) 478 491 { 479 492 swp_entry_t arch_entry; ··· 470 507 return __swp_entry_to_pmd(arch_entry); 471 508 } 472 509 473 - static inline int is_pmd_migration_entry(pmd_t pmd) 474 - { 475 - swp_entry_t entry; 476 - 477 - if (pmd_present(pmd)) 478 - return 0; 479 - 480 - entry = pmd_to_swp_entry(pmd); 481 - return is_migration_entry(entry); 482 - } 483 510 #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 484 - static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 485 - struct page *page) 486 - { 487 - BUILD_BUG(); 488 - } 489 - 490 511 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 491 512 struct page *new) 492 513 { ··· 479 532 480 533 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } 481 534 482 - static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 483 - { 484 - return swp_entry(0, 0); 485 - } 486 - 487 535 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) 488 536 { 489 537 return __pmd(0); 490 538 } 491 539 492 - static inline int is_pmd_migration_entry(pmd_t pmd) 493 - { 494 - return 0; 495 - } 496 540 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 497 - 498 - #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION) 499 - 500 - /** 501 - * is_pmd_device_private_entry() - Check if PMD contains a device private swap entry 502 - * @pmd: The PMD to check 503 - * 504 - * Returns true if the PMD contains a swap entry that represents a device private 505 - * page mapping. This is used for zone device private pages that have been 506 - * swapped out but still need special handling during various memory management 507 - * operations. 508 - * 509 - * Return: 1 if PMD contains device private entry, 0 otherwise 510 - */ 511 - static inline int is_pmd_device_private_entry(pmd_t pmd) 512 - { 513 - swp_entry_t entry; 514 - 515 - if (pmd_present(pmd)) 516 - return 0; 517 - 518 - entry = pmd_to_swp_entry(pmd); 519 - return is_device_private_entry(entry); 520 - } 521 - 522 - #else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ 523 - 524 - static inline int is_pmd_device_private_entry(pmd_t pmd) 525 - { 526 - return 0; 527 - } 528 - 529 - #endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ 530 541 531 542 static inline int non_swap_entry(swp_entry_t entry) 532 543 { 533 544 return swp_type(entry) >= MAX_SWAPFILES; 534 - } 535 - 536 - static inline int is_pmd_non_present_folio_entry(pmd_t pmd) 537 - { 538 - return is_pmd_migration_entry(pmd) || is_pmd_device_private_entry(pmd); 539 545 } 540 546 541 547 #endif /* CONFIG_MMU */
+3 -3
mm/damon/ops-common.c
··· 11 11 #include <linux/pagemap.h> 12 12 #include <linux/rmap.h> 13 13 #include <linux/swap.h> 14 - #include <linux/swapops.h> 14 + #include <linux/leafops.h> 15 15 16 16 #include "../internal.h" 17 17 #include "ops-common.h" ··· 51 51 if (likely(pte_present(pteval))) 52 52 pfn = pte_pfn(pteval); 53 53 else 54 - pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); 54 + pfn = softleaf_to_pfn(softleaf_from_pte(pteval)); 55 55 56 56 folio = damon_get_folio(pfn); 57 57 if (!folio) ··· 83 83 if (likely(pmd_present(pmdval))) 84 84 pfn = pmd_pfn(pmdval); 85 85 else 86 - pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval)); 86 + pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval)); 87 87 88 88 folio = damon_get_folio(pfn); 89 89 if (!folio)
+3 -3
mm/filemap.c
··· 21 21 #include <linux/gfp.h> 22 22 #include <linux/mm.h> 23 23 #include <linux/swap.h> 24 - #include <linux/swapops.h> 24 + #include <linux/leafops.h> 25 25 #include <linux/syscalls.h> 26 26 #include <linux/mman.h> 27 27 #include <linux/pagemap.h> ··· 1402 1402 * This follows the same logic as folio_wait_bit_common() so see the comments 1403 1403 * there. 1404 1404 */ 1405 - void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 1405 + void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl) 1406 1406 __releases(ptl) 1407 1407 { 1408 1408 struct wait_page_queue wait_page; ··· 1411 1411 unsigned long pflags; 1412 1412 bool in_thrashing; 1413 1413 wait_queue_head_t *q; 1414 - struct folio *folio = pfn_swap_entry_folio(entry); 1414 + struct folio *folio = softleaf_to_folio(entry); 1415 1415 1416 1416 q = folio_waitqueue(folio); 1417 1417 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
+8 -8
mm/hmm.c
··· 18 18 #include <linux/sched.h> 19 19 #include <linux/mmzone.h> 20 20 #include <linux/pagemap.h> 21 - #include <linux/swapops.h> 21 + #include <linux/leafops.h> 22 22 #include <linux/hugetlb.h> 23 23 #include <linux/memremap.h> 24 24 #include <linux/sched/mm.h> ··· 339 339 struct hmm_vma_walk *hmm_vma_walk = walk->private; 340 340 struct hmm_range *range = hmm_vma_walk->range; 341 341 unsigned long npages = (end - start) >> PAGE_SHIFT; 342 + const softleaf_t entry = softleaf_from_pmd(pmd); 342 343 unsigned long addr = start; 343 - swp_entry_t entry = pmd_to_swp_entry(pmd); 344 344 unsigned int required_fault; 345 345 346 - if (is_device_private_entry(entry) && 347 - pfn_swap_entry_folio(entry)->pgmap->owner == 346 + if (softleaf_is_device_private(entry) && 347 + softleaf_to_folio(entry)->pgmap->owner == 348 348 range->dev_private_owner) { 349 349 unsigned long cpu_flags = HMM_PFN_VALID | 350 350 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); 351 - unsigned long pfn = swp_offset_pfn(entry); 351 + unsigned long pfn = softleaf_to_pfn(entry); 352 352 unsigned long i; 353 353 354 - if (is_writable_device_private_entry(entry)) 354 + if (softleaf_is_device_private_write(entry)) 355 355 cpu_flags |= HMM_PFN_WRITE; 356 356 357 357 /* ··· 370 370 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, 371 371 npages, 0); 372 372 if (required_fault) { 373 - if (is_device_private_entry(entry)) 373 + if (softleaf_is_device_private(entry)) 374 374 return hmm_vma_fault(addr, end, required_fault, walk); 375 375 else 376 376 return -EFAULT; ··· 412 412 if (pmd_none(pmd)) 413 413 return hmm_vma_walk_hole(start, end, -1, walk); 414 414 415 - if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 415 + if (thp_migration_supported() && pmd_is_migration_entry(pmd)) { 416 416 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { 417 417 hmm_vma_walk->last = addr; 418 418 pmd_migration_entry_wait(walk->mm, pmdp);
+49 -49
mm/huge_memory.c
··· 1299 1299 struct vm_area_struct *vma = vmf->vma; 1300 1300 vm_fault_t ret = 0; 1301 1301 spinlock_t *ptl; 1302 - swp_entry_t swp_entry; 1302 + softleaf_t entry; 1303 1303 struct page *page; 1304 1304 struct folio *folio; 1305 1305 ··· 1314 1314 return 0; 1315 1315 } 1316 1316 1317 - swp_entry = pmd_to_swp_entry(vmf->orig_pmd); 1318 - page = pfn_swap_entry_to_page(swp_entry); 1317 + entry = softleaf_from_pmd(vmf->orig_pmd); 1318 + page = softleaf_to_page(entry); 1319 1319 folio = page_folio(page); 1320 1320 vmf->page = page; 1321 1321 vmf->pte = NULL; ··· 1705 1705 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1706 1706 pmd_t pmd, pgtable_t pgtable) 1707 1707 { 1708 - swp_entry_t entry = pmd_to_swp_entry(pmd); 1708 + softleaf_t entry = softleaf_from_pmd(pmd); 1709 1709 struct folio *src_folio; 1710 1710 1711 - VM_WARN_ON(!is_pmd_non_present_folio_entry(pmd)); 1711 + VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd)); 1712 1712 1713 - if (is_writable_migration_entry(entry) || 1714 - is_readable_exclusive_migration_entry(entry)) { 1713 + if (softleaf_is_migration_write(entry) || 1714 + softleaf_is_migration_read_exclusive(entry)) { 1715 1715 entry = make_readable_migration_entry(swp_offset(entry)); 1716 1716 pmd = swp_entry_to_pmd(entry); 1717 1717 if (pmd_swp_soft_dirty(*src_pmd)) ··· 1719 1719 if (pmd_swp_uffd_wp(*src_pmd)) 1720 1720 pmd = pmd_swp_mkuffd_wp(pmd); 1721 1721 set_pmd_at(src_mm, addr, src_pmd, pmd); 1722 - } else if (is_device_private_entry(entry)) { 1722 + } else if (softleaf_is_device_private(entry)) { 1723 1723 /* 1724 1724 * For device private entries, since there are no 1725 1725 * read exclusive entries, writable = !readable 1726 1726 */ 1727 - if (is_writable_device_private_entry(entry)) { 1727 + if (softleaf_is_device_private_write(entry)) { 1728 1728 entry = make_readable_device_private_entry(swp_offset(entry)); 1729 1729 pmd = swp_entry_to_pmd(entry); 1730 1730 ··· 1735 1735 set_pmd_at(src_mm, addr, src_pmd, pmd); 1736 1736 } 1737 1737 1738 - src_folio = pfn_swap_entry_folio(entry); 1738 + src_folio = softleaf_to_folio(entry); 1739 1739 VM_WARN_ON(!folio_test_large(src_folio)); 1740 1740 1741 1741 folio_get(src_folio); ··· 2195 2195 2196 2196 if (unlikely(!pmd_present(orig_pmd))) { 2197 2197 VM_BUG_ON(thp_migration_supported() && 2198 - !is_pmd_migration_entry(orig_pmd)); 2198 + !pmd_is_migration_entry(orig_pmd)); 2199 2199 goto out; 2200 2200 } 2201 2201 ··· 2293 2293 folio_remove_rmap_pmd(folio, page, vma); 2294 2294 WARN_ON_ONCE(folio_mapcount(folio) < 0); 2295 2295 VM_BUG_ON_PAGE(!PageHead(page), page); 2296 - } else if (is_pmd_non_present_folio_entry(orig_pmd)) { 2297 - swp_entry_t entry; 2296 + } else if (pmd_is_valid_softleaf(orig_pmd)) { 2297 + const softleaf_t entry = softleaf_from_pmd(orig_pmd); 2298 2298 2299 - entry = pmd_to_swp_entry(orig_pmd); 2300 - folio = pfn_swap_entry_folio(entry); 2299 + folio = softleaf_to_folio(entry); 2301 2300 flush_needed = 0; 2302 2301 2303 2302 if (!thp_migration_supported()) ··· 2352 2353 static pmd_t move_soft_dirty_pmd(pmd_t pmd) 2353 2354 { 2354 2355 #ifdef CONFIG_MEM_SOFT_DIRTY 2355 - if (unlikely(is_pmd_migration_entry(pmd))) 2356 + if (unlikely(pmd_is_migration_entry(pmd))) 2356 2357 pmd = pmd_swp_mksoft_dirty(pmd); 2357 2358 else if (pmd_present(pmd)) 2358 2359 pmd = pmd_mksoft_dirty(pmd); ··· 2427 2428 unsigned long addr, pmd_t *pmd, bool uffd_wp, 2428 2429 bool uffd_wp_resolve) 2429 2430 { 2430 - swp_entry_t entry = pmd_to_swp_entry(*pmd); 2431 - struct folio *folio = pfn_swap_entry_folio(entry); 2431 + softleaf_t entry = softleaf_from_pmd(*pmd); 2432 + const struct folio *folio = softleaf_to_folio(entry); 2432 2433 pmd_t newpmd; 2433 2434 2434 - VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd)); 2435 - if (is_writable_migration_entry(entry)) { 2435 + VM_WARN_ON(!pmd_is_valid_softleaf(*pmd)); 2436 + if (softleaf_is_migration_write(entry)) { 2436 2437 /* 2437 2438 * A protection check is difficult so 2438 2439 * just be safe and disable write ··· 2444 2445 newpmd = swp_entry_to_pmd(entry); 2445 2446 if (pmd_swp_soft_dirty(*pmd)) 2446 2447 newpmd = pmd_swp_mksoft_dirty(newpmd); 2447 - } else if (is_writable_device_private_entry(entry)) { 2448 + } else if (softleaf_is_device_private_write(entry)) { 2448 2449 entry = make_readable_device_private_entry(swp_offset(entry)); 2449 2450 newpmd = swp_entry_to_pmd(entry); 2450 2451 } else { ··· 2642 2643 2643 2644 if (!pmd_trans_huge(src_pmdval)) { 2644 2645 spin_unlock(src_ptl); 2645 - if (is_pmd_migration_entry(src_pmdval)) { 2646 + if (pmd_is_migration_entry(src_pmdval)) { 2646 2647 pmd_migration_entry_wait(mm, &src_pmdval); 2647 2648 return -EAGAIN; 2648 2649 } ··· 2907 2908 unsigned long addr; 2908 2909 pte_t *pte; 2909 2910 int i; 2910 - swp_entry_t entry; 2911 2911 2912 2912 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2913 2913 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2914 2914 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 2915 2915 2916 - VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd) && !pmd_trans_huge(*pmd)); 2916 + VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd)); 2917 2917 2918 2918 count_vm_event(THP_SPLIT_PMD); 2919 2919 ··· 2926 2928 zap_deposited_table(mm, pmd); 2927 2929 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) 2928 2930 return; 2929 - if (unlikely(is_pmd_migration_entry(old_pmd))) { 2930 - swp_entry_t entry; 2931 + if (unlikely(pmd_is_migration_entry(old_pmd))) { 2932 + const softleaf_t old_entry = softleaf_from_pmd(old_pmd); 2931 2933 2932 - entry = pmd_to_swp_entry(old_pmd); 2933 - folio = pfn_swap_entry_folio(entry); 2934 + folio = softleaf_to_folio(old_entry); 2934 2935 } else if (is_huge_zero_pmd(old_pmd)) { 2935 2936 return; 2936 2937 } else { ··· 2959 2962 return __split_huge_zero_page_pmd(vma, haddr, pmd); 2960 2963 } 2961 2964 2965 + if (pmd_is_migration_entry(*pmd)) { 2966 + softleaf_t entry; 2962 2967 2963 - if (is_pmd_migration_entry(*pmd)) { 2964 2968 old_pmd = *pmd; 2965 - entry = pmd_to_swp_entry(old_pmd); 2966 - page = pfn_swap_entry_to_page(entry); 2969 + entry = softleaf_from_pmd(old_pmd); 2970 + page = softleaf_to_page(entry); 2967 2971 folio = page_folio(page); 2968 2972 2969 2973 soft_dirty = pmd_swp_soft_dirty(old_pmd); 2970 2974 uffd_wp = pmd_swp_uffd_wp(old_pmd); 2971 2975 2972 - write = is_writable_migration_entry(entry); 2976 + write = softleaf_is_migration_write(entry); 2973 2977 if (PageAnon(page)) 2974 - anon_exclusive = is_readable_exclusive_migration_entry(entry); 2975 - young = is_migration_entry_young(entry); 2976 - dirty = is_migration_entry_dirty(entry); 2977 - } else if (is_pmd_device_private_entry(*pmd)) { 2978 + anon_exclusive = softleaf_is_migration_read_exclusive(entry); 2979 + young = softleaf_is_migration_young(entry); 2980 + dirty = softleaf_is_migration_dirty(entry); 2981 + } else if (pmd_is_device_private_entry(*pmd)) { 2982 + softleaf_t entry; 2983 + 2978 2984 old_pmd = *pmd; 2979 - entry = pmd_to_swp_entry(old_pmd); 2980 - page = pfn_swap_entry_to_page(entry); 2985 + entry = softleaf_from_pmd(old_pmd); 2986 + page = softleaf_to_page(entry); 2981 2987 folio = page_folio(page); 2982 2988 2983 2989 soft_dirty = pmd_swp_soft_dirty(old_pmd); 2984 2990 uffd_wp = pmd_swp_uffd_wp(old_pmd); 2985 2991 2986 - write = is_writable_device_private_entry(entry); 2992 + write = softleaf_is_device_private_write(entry); 2987 2993 anon_exclusive = PageAnonExclusive(page); 2988 2994 2989 2995 /* ··· 3090 3090 * Note that NUMA hinting access restrictions are not transferred to 3091 3091 * avoid any possibility of altering permissions across VMAs. 3092 3092 */ 3093 - if (freeze || is_pmd_migration_entry(old_pmd)) { 3093 + if (freeze || pmd_is_migration_entry(old_pmd)) { 3094 3094 pte_t entry; 3095 3095 swp_entry_t swp_entry; 3096 3096 ··· 3116 3116 VM_WARN_ON(!pte_none(ptep_get(pte + i))); 3117 3117 set_pte_at(mm, addr, pte + i, entry); 3118 3118 } 3119 - } else if (is_pmd_device_private_entry(old_pmd)) { 3119 + } else if (pmd_is_device_private_entry(old_pmd)) { 3120 3120 pte_t entry; 3121 3121 swp_entry_t swp_entry; 3122 3122 ··· 3166 3166 } 3167 3167 pte_unmap(pte); 3168 3168 3169 - if (!is_pmd_migration_entry(*pmd)) 3169 + if (!pmd_is_migration_entry(*pmd)) 3170 3170 folio_remove_rmap_pmd(folio, page, vma); 3171 3171 if (freeze) 3172 3172 put_page(page); ··· 3179 3179 pmd_t *pmd, bool freeze) 3180 3180 { 3181 3181 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE)); 3182 - if (pmd_trans_huge(*pmd) || is_pmd_non_present_folio_entry(*pmd)) 3182 + if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd)) 3183 3183 __split_huge_pmd_locked(vma, pmd, address, freeze); 3184 3184 } 3185 3185 ··· 4749 4749 unsigned long address = pvmw->address; 4750 4750 unsigned long haddr = address & HPAGE_PMD_MASK; 4751 4751 pmd_t pmde; 4752 - swp_entry_t entry; 4752 + softleaf_t entry; 4753 4753 4754 4754 if (!(pvmw->pmd && !pvmw->pte)) 4755 4755 return; 4756 4756 4757 - entry = pmd_to_swp_entry(*pvmw->pmd); 4757 + entry = softleaf_from_pmd(*pvmw->pmd); 4758 4758 folio_get(folio); 4759 4759 pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot)); 4760 4760 4761 4761 if (pmd_swp_soft_dirty(*pvmw->pmd)) 4762 4762 pmde = pmd_mksoft_dirty(pmde); 4763 - if (is_writable_migration_entry(entry)) 4763 + if (softleaf_is_migration_write(entry)) 4764 4764 pmde = pmd_mkwrite(pmde, vma); 4765 4765 if (pmd_swp_uffd_wp(*pvmw->pmd)) 4766 4766 pmde = pmd_mkuffd_wp(pmde); 4767 - if (!is_migration_entry_young(entry)) 4767 + if (!softleaf_is_migration_young(entry)) 4768 4768 pmde = pmd_mkold(pmde); 4769 4769 /* NOTE: this may contain setting soft-dirty on some archs */ 4770 - if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 4770 + if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry)) 4771 4771 pmde = pmd_mkdirty(pmde); 4772 4772 4773 4773 if (folio_is_device_private(folio)) { ··· 4790 4790 if (folio_test_anon(folio)) { 4791 4791 rmap_t rmap_flags = RMAP_NONE; 4792 4792 4793 - if (!is_readable_migration_entry(entry)) 4793 + if (!softleaf_is_migration_read(entry)) 4794 4794 rmap_flags |= RMAP_EXCLUSIVE; 4795 4795 4796 4796 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
+2 -2
mm/khugepaged.c
··· 17 17 #include <linux/page_idle.h> 18 18 #include <linux/page_table_check.h> 19 19 #include <linux/rcupdate_wait.h> 20 - #include <linux/swapops.h> 20 + #include <linux/leafops.h> 21 21 #include <linux/shmem_fs.h> 22 22 #include <linux/dax.h> 23 23 #include <linux/ksm.h> ··· 941 941 * collapse it. Migration success or failure will eventually end 942 942 * up with a present PMD mapping a folio again. 943 943 */ 944 - if (is_pmd_migration_entry(pmde)) 944 + if (pmd_is_migration_entry(pmde)) 945 945 return SCAN_PMD_MAPPED; 946 946 if (!pmd_present(pmde)) 947 947 return SCAN_PMD_NULL;
+1 -1
mm/madvise.c
··· 390 390 391 391 if (unlikely(!pmd_present(orig_pmd))) { 392 392 VM_BUG_ON(thp_migration_supported() && 393 - !is_pmd_migration_entry(orig_pmd)); 393 + !pmd_is_migration_entry(orig_pmd)); 394 394 goto huge_unlock; 395 395 } 396 396
+2 -2
mm/memory.c
··· 6352 6352 goto fallback; 6353 6353 6354 6354 if (unlikely(!pmd_present(vmf.orig_pmd))) { 6355 - if (is_pmd_device_private_entry(vmf.orig_pmd)) 6355 + if (pmd_is_device_private_entry(vmf.orig_pmd)) 6356 6356 return do_huge_pmd_device_private(&vmf); 6357 6357 6358 - if (is_pmd_migration_entry(vmf.orig_pmd)) 6358 + if (pmd_is_migration_entry(vmf.orig_pmd)) 6359 6359 pmd_migration_entry_wait(mm, vmf.pmd); 6360 6360 return 0; 6361 6361 }
+2 -2
mm/mempolicy.c
··· 110 110 #include <linux/mm_inline.h> 111 111 #include <linux/mmu_notifier.h> 112 112 #include <linux/printk.h> 113 - #include <linux/swapops.h> 113 + #include <linux/leafops.h> 114 114 #include <linux/gcd.h> 115 115 116 116 #include <asm/tlbflush.h> ··· 647 647 struct folio *folio; 648 648 struct queue_pages *qp = walk->private; 649 649 650 - if (unlikely(is_pmd_migration_entry(*pmd))) { 650 + if (unlikely(pmd_is_migration_entry(*pmd))) { 651 651 qp->nr_failed++; 652 652 return; 653 653 }
+10 -10
mm/migrate.c
··· 16 16 #include <linux/migrate.h> 17 17 #include <linux/export.h> 18 18 #include <linux/swap.h> 19 - #include <linux/swapops.h> 19 + #include <linux/leafops.h> 20 20 #include <linux/pagemap.h> 21 21 #include <linux/buffer_head.h> 22 22 #include <linux/mm_inline.h> ··· 353 353 rmap_t rmap_flags = RMAP_NONE; 354 354 pte_t old_pte; 355 355 pte_t pte; 356 - swp_entry_t entry; 356 + softleaf_t entry; 357 357 struct page *new; 358 358 unsigned long idx = 0; 359 359 ··· 379 379 folio_get(folio); 380 380 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 381 381 382 - entry = pte_to_swp_entry(old_pte); 383 - if (!is_migration_entry_young(entry)) 382 + entry = softleaf_from_pte(old_pte); 383 + if (!softleaf_is_migration_young(entry)) 384 384 pte = pte_mkold(pte); 385 - if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 385 + if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry)) 386 386 pte = pte_mkdirty(pte); 387 387 if (pte_swp_soft_dirty(old_pte)) 388 388 pte = pte_mksoft_dirty(pte); 389 389 else 390 390 pte = pte_clear_soft_dirty(pte); 391 391 392 - if (is_writable_migration_entry(entry)) 392 + if (softleaf_is_migration_write(entry)) 393 393 pte = pte_mkwrite(pte, vma); 394 394 else if (pte_swp_uffd_wp(old_pte)) 395 395 pte = pte_mkuffd_wp(pte); 396 396 397 - if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 397 + if (folio_test_anon(folio) && !softleaf_is_migration_read(entry)) 398 398 rmap_flags |= RMAP_EXCLUSIVE; 399 399 400 400 if (unlikely(is_device_private_page(new))) { ··· 404 404 else 405 405 entry = make_readable_device_private_entry( 406 406 page_to_pfn(new)); 407 - pte = swp_entry_to_pte(entry); 407 + pte = softleaf_to_pte(entry); 408 408 if (pte_swp_soft_dirty(old_pte)) 409 409 pte = pte_swp_mksoft_dirty(pte); 410 410 if (pte_swp_uffd_wp(old_pte)) ··· 543 543 spinlock_t *ptl; 544 544 545 545 ptl = pmd_lock(mm, pmd); 546 - if (!is_pmd_migration_entry(*pmd)) 546 + if (!pmd_is_migration_entry(*pmd)) 547 547 goto unlock; 548 - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); 548 + migration_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl); 549 549 return; 550 550 unlock: 551 551 spin_unlock(ptl);
+7 -7
mm/migrate_device.c
··· 13 13 #include <linux/oom.h> 14 14 #include <linux/pagewalk.h> 15 15 #include <linux/rmap.h> 16 - #include <linux/swapops.h> 16 + #include <linux/leafops.h> 17 17 #include <linux/pgalloc.h> 18 18 #include <asm/tlbflush.h> 19 19 #include "internal.h" ··· 141 141 struct folio *folio; 142 142 struct migrate_vma *migrate = walk->private; 143 143 spinlock_t *ptl; 144 - swp_entry_t entry; 145 144 int ret; 146 145 unsigned long write = 0; 147 146 ··· 164 165 if (pmd_write(*pmdp)) 165 166 write = MIGRATE_PFN_WRITE; 166 167 } else if (!pmd_present(*pmdp)) { 167 - entry = pmd_to_swp_entry(*pmdp); 168 - folio = pfn_swap_entry_folio(entry); 168 + const softleaf_t entry = softleaf_from_pmd(*pmdp); 169 169 170 - if (!is_device_private_entry(entry) || 170 + folio = softleaf_to_folio(entry); 171 + 172 + if (!softleaf_is_device_private(entry) || 171 173 !(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || 172 174 (folio->pgmap->owner != migrate->pgmap_owner)) { 173 175 spin_unlock(ptl); 174 176 return migrate_vma_collect_skip(start, end, walk); 175 177 } 176 178 177 - if (is_migration_entry(entry)) { 179 + if (softleaf_is_migration(entry)) { 178 180 migration_entry_wait_on_locked(entry, ptl); 179 181 spin_unlock(ptl); 180 182 return -EAGAIN; 181 183 } 182 184 183 - if (is_writable_device_private_entry(entry)) 185 + if (softleaf_is_device_private_write(entry)) 184 186 write = MIGRATE_PFN_WRITE; 185 187 } else { 186 188 spin_unlock(ptl);
+8 -8
mm/page_table_check.c
··· 8 8 #include <linux/mm.h> 9 9 #include <linux/page_table_check.h> 10 10 #include <linux/swap.h> 11 - #include <linux/swapops.h> 11 + #include <linux/leafops.h> 12 12 13 13 #undef pr_fmt 14 14 #define pr_fmt(fmt) "page_table_check: " fmt ··· 179 179 EXPORT_SYMBOL(__page_table_check_pud_clear); 180 180 181 181 /* Whether the swap entry cached writable information */ 182 - static inline bool swap_cached_writable(swp_entry_t entry) 182 + static inline bool softleaf_cached_writable(softleaf_t entry) 183 183 { 184 - return is_writable_device_private_entry(entry) || 185 - is_writable_migration_entry(entry); 184 + return softleaf_is_device_private_write(entry) || 185 + softleaf_is_migration_write(entry); 186 186 } 187 187 188 188 static void page_table_check_pte_flags(pte_t pte) ··· 190 190 if (pte_present(pte)) { 191 191 WARN_ON_ONCE(pte_uffd_wp(pte) && pte_write(pte)); 192 192 } else if (pte_swp_uffd_wp(pte)) { 193 - const swp_entry_t entry = pte_to_swp_entry(pte); 193 + const softleaf_t entry = softleaf_from_pte(pte); 194 194 195 - WARN_ON_ONCE(swap_cached_writable(entry)); 195 + WARN_ON_ONCE(softleaf_cached_writable(entry)); 196 196 } 197 197 } 198 198 ··· 219 219 if (pmd_uffd_wp(pmd)) 220 220 WARN_ON_ONCE(pmd_write(pmd)); 221 221 } else if (pmd_swp_uffd_wp(pmd)) { 222 - swp_entry_t entry = pmd_to_swp_entry(pmd); 222 + const softleaf_t entry = softleaf_from_pmd(pmd); 223 223 224 - WARN_ON_ONCE(swap_cached_writable(entry)); 224 + WARN_ON_ONCE(softleaf_cached_writable(entry)); 225 225 } 226 226 } 227 227
+8 -7
mm/page_vma_mapped.c
··· 242 242 */ 243 243 pmde = pmdp_get_lockless(pvmw->pmd); 244 244 245 - if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { 245 + if (pmd_trans_huge(pmde) || pmd_is_migration_entry(pmde)) { 246 246 pvmw->ptl = pmd_lock(mm, pvmw->pmd); 247 247 pmde = *pvmw->pmd; 248 248 if (!pmd_present(pmde)) { 249 - swp_entry_t entry; 249 + softleaf_t entry; 250 250 251 251 if (!thp_migration_supported() || 252 252 !(pvmw->flags & PVMW_MIGRATION)) 253 253 return not_found(pvmw); 254 - entry = pmd_to_swp_entry(pmde); 255 - if (!is_migration_entry(entry) || 256 - !check_pmd(swp_offset_pfn(entry), pvmw)) 254 + entry = softleaf_from_pmd(pmde); 255 + 256 + if (!softleaf_is_migration(entry) || 257 + !check_pmd(softleaf_to_pfn(entry), pvmw)) 257 258 return not_found(pvmw); 258 259 return true; 259 260 } ··· 274 273 * cannot return prematurely, while zap_huge_pmd() has 275 274 * cleared *pmd but not decremented compound_mapcount(). 276 275 */ 277 - swp_entry_t entry = pmd_to_swp_entry(pmde); 276 + const softleaf_t entry = softleaf_from_pmd(pmde); 278 277 279 - if (is_device_private_entry(entry)) { 278 + if (softleaf_is_device_private(entry)) { 280 279 pvmw->ptl = pmd_lock(mm, pvmw->pmd); 281 280 return true; 282 281 }
+4 -4
mm/pagewalk.c
··· 5 5 #include <linux/hugetlb.h> 6 6 #include <linux/mmu_context.h> 7 7 #include <linux/swap.h> 8 - #include <linux/swapops.h> 8 + #include <linux/leafops.h> 9 9 10 10 #include <asm/tlbflush.h> 11 11 ··· 973 973 goto found; 974 974 } 975 975 } else if ((flags & FW_MIGRATION) && 976 - is_pmd_migration_entry(pmd)) { 977 - swp_entry_t entry = pmd_to_swp_entry(pmd); 976 + pmd_is_migration_entry(pmd)) { 977 + const softleaf_t entry = softleaf_from_pmd(pmd); 978 978 979 - page = pfn_swap_entry_to_page(entry); 979 + page = softleaf_to_page(entry); 980 980 expose_page = false; 981 981 goto found; 982 982 }
+2 -2
mm/rmap.c
··· 57 57 #include <linux/sched/task.h> 58 58 #include <linux/pagemap.h> 59 59 #include <linux/swap.h> 60 - #include <linux/swapops.h> 60 + #include <linux/leafops.h> 61 61 #include <linux/slab.h> 62 62 #include <linux/init.h> 63 63 #include <linux/ksm.h> ··· 2341 2341 if (likely(pmd_present(pmdval))) 2342 2342 pfn = pmd_pfn(pmdval); 2343 2343 else 2344 - pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval)); 2344 + pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval)); 2345 2345 2346 2346 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2347 2347