Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: remove callers of pfn_t functionality

All PFN_* pfn_t flags have been removed. Therefore there is no longer a
need for the pfn_t type and all uses can be replaced with normal pfns.

Link: https://lkml.kernel.org/r/bbedfa576c9822f8032494efbe43544628698b1f.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Björn Töpel <bjorn@rivosinc.com>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Alistair Popple and committed by
Andrew Morton
21aa65bf 984921ed

+109 -235
-1
arch/x86/mm/pat/memtype.c
··· 36 36 #include <linux/debugfs.h> 37 37 #include <linux/ioport.h> 38 38 #include <linux/kernel.h> 39 - #include <linux/pfn_t.h> 40 39 #include <linux/slab.h> 41 40 #include <linux/io.h> 42 41 #include <linux/mm.h>
+11 -12
drivers/dax/device.c
··· 4 4 #include <linux/pagemap.h> 5 5 #include <linux/module.h> 6 6 #include <linux/device.h> 7 - #include <linux/pfn_t.h> 8 7 #include <linux/cdev.h> 9 8 #include <linux/slab.h> 10 9 #include <linux/dax.h> ··· 72 73 return -1; 73 74 } 74 75 75 - static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, 76 + static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn, 76 77 unsigned long fault_size) 77 78 { 78 79 unsigned long i, nr_pages = fault_size / PAGE_SIZE; ··· 88 89 ALIGN_DOWN(vmf->address, fault_size)); 89 90 90 91 for (i = 0; i < nr_pages; i++) { 91 - struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i); 92 + struct folio *folio = pfn_folio(pfn + i); 92 93 93 94 if (folio->mapping) 94 95 continue; ··· 103 104 { 104 105 struct device *dev = &dev_dax->dev; 105 106 phys_addr_t phys; 106 - pfn_t pfn; 107 + unsigned long pfn; 107 108 unsigned int fault_size = PAGE_SIZE; 108 109 109 110 if (check_vma(dev_dax, vmf->vma, __func__)) ··· 124 125 return VM_FAULT_SIGBUS; 125 126 } 126 127 127 - pfn = phys_to_pfn_t(phys, 0); 128 + pfn = PHYS_PFN(phys); 128 129 129 130 dax_set_mapping(vmf, pfn, fault_size); 130 131 131 - return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), 132 + return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), 132 133 vmf->flags & FAULT_FLAG_WRITE); 133 134 } 134 135 ··· 139 140 struct device *dev = &dev_dax->dev; 140 141 phys_addr_t phys; 141 142 pgoff_t pgoff; 142 - pfn_t pfn; 143 + unsigned long pfn; 143 144 unsigned int fault_size = PMD_SIZE; 144 145 145 146 if (check_vma(dev_dax, vmf->vma, __func__)) ··· 168 169 return VM_FAULT_SIGBUS; 169 170 } 170 171 171 - pfn = phys_to_pfn_t(phys, 0); 172 + pfn = PHYS_PFN(phys); 172 173 173 174 dax_set_mapping(vmf, pfn, fault_size); 174 175 175 - return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)), 176 + return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)), 176 177 vmf->flags & FAULT_FLAG_WRITE); 177 178 } 178 179 ··· 184 185 struct device *dev = &dev_dax->dev; 185 186 phys_addr_t phys; 186 187 pgoff_t pgoff; 187 - pfn_t pfn; 188 + unsigned long pfn; 188 189 unsigned int fault_size = PUD_SIZE; 189 190 190 191 ··· 214 215 return VM_FAULT_SIGBUS; 215 216 } 216 217 217 - pfn = phys_to_pfn_t(phys, 0); 218 + pfn = PHYS_PFN(phys); 218 219 219 220 dax_set_mapping(vmf, pfn, fault_size); 220 221 221 - return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)), 222 + return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)), 222 223 vmf->flags & FAULT_FLAG_WRITE); 223 224 } 224 225 #else
-1
drivers/dax/hmem/hmem.c
··· 2 2 #include <linux/platform_device.h> 3 3 #include <linux/memregion.h> 4 4 #include <linux/module.h> 5 - #include <linux/pfn_t.h> 6 5 #include <linux/dax.h> 7 6 #include "../bus.h" 8 7
-1
drivers/dax/kmem.c
··· 5 5 #include <linux/memory.h> 6 6 #include <linux/module.h> 7 7 #include <linux/device.h> 8 - #include <linux/pfn_t.h> 9 8 #include <linux/slab.h> 10 9 #include <linux/dax.h> 11 10 #include <linux/fs.h>
-1
drivers/dax/pmem.c
··· 2 2 /* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ 3 3 #include <linux/memremap.h> 4 4 #include <linux/module.h> 5 - #include <linux/pfn_t.h> 6 5 #include "../nvdimm/pfn.h" 7 6 #include "../nvdimm/nd.h" 8 7 #include "bus.h"
+1 -2
drivers/dax/super.c
··· 7 7 #include <linux/mount.h> 8 8 #include <linux/pseudo_fs.h> 9 9 #include <linux/magic.h> 10 - #include <linux/pfn_t.h> 11 10 #include <linux/cdev.h> 12 11 #include <linux/slab.h> 13 12 #include <linux/uio.h> ··· 147 148 * pages accessible at the device relative @pgoff. 148 149 */ 149 150 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 150 - enum dax_access_mode mode, void **kaddr, pfn_t *pfn) 151 + enum dax_access_mode mode, void **kaddr, unsigned long *pfn) 151 152 { 152 153 long avail; 153 154
-1
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 7 7 8 8 9 9 #include <linux/dma-buf.h> 10 - #include <linux/pfn_t.h> 11 10 #include <linux/shmem_fs.h> 12 11 #include <linux/module.h> 13 12
+1 -2
drivers/gpu/drm/gma500/fbdev.c
··· 6 6 **************************************************************************/ 7 7 8 8 #include <linux/fb.h> 9 - #include <linux/pfn_t.h> 10 9 11 10 #include <drm/drm_crtc_helper.h> 12 11 #include <drm/drm_drv.h> ··· 32 33 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 33 34 34 35 for (i = 0; i < page_num; ++i) { 35 - err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0)); 36 + err = vmf_insert_mixed(vma, address, pfn); 36 37 if (unlikely(err & VM_FAULT_ERROR)) 37 38 break; 38 39 address += PAGE_SIZE;
-1
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 5 5 6 6 #include <linux/anon_inodes.h> 7 7 #include <linux/mman.h> 8 - #include <linux/pfn_t.h> 9 8 #include <linux/sizes.h> 10 9 11 10 #include <drm/drm_cache.h>
-1
drivers/gpu/drm/msm/msm_gem.c
··· 9 9 #include <linux/spinlock.h> 10 10 #include <linux/shmem_fs.h> 11 11 #include <linux/dma-buf.h> 12 - #include <linux/pfn_t.h> 13 12 14 13 #include <drm/drm_prime.h> 15 14 #include <drm/drm_file.h>
+2 -4
drivers/gpu/drm/omapdrm/omap_gem.c
··· 8 8 #include <linux/seq_file.h> 9 9 #include <linux/shmem_fs.h> 10 10 #include <linux/spinlock.h> 11 - #include <linux/pfn_t.h> 12 11 #include <linux/vmalloc.h> 13 12 14 13 #include <drm/drm_prime.h> ··· 370 371 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 371 372 pfn, pfn << PAGE_SHIFT); 372 373 373 - return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0)); 374 + return vmf_insert_mixed(vma, vmf->address, pfn); 374 375 } 375 376 376 377 /* Special handling for the case of faulting in 2d tiled buffers */ ··· 465 466 pfn, pfn << PAGE_SHIFT); 466 467 467 468 for (i = n; i > 0; i--) { 468 - ret = vmf_insert_mixed(vma, 469 - vaddr, __pfn_to_pfn_t(pfn, 0)); 469 + ret = vmf_insert_mixed(vma, vaddr, pfn); 470 470 if (ret & VM_FAULT_ERROR) 471 471 break; 472 472 pfn += priv->usergart[fmt].stride_pfn;
-1
drivers/gpu/drm/v3d/v3d_bo.c
··· 16 16 */ 17 17 18 18 #include <linux/dma-buf.h> 19 - #include <linux/pfn_t.h> 20 19 #include <linux/vmalloc.h> 21 20 22 21 #include "v3d_drv.h"
+1 -2
drivers/hwtracing/intel_th/msu.c
··· 19 19 #include <linux/io.h> 20 20 #include <linux/workqueue.h> 21 21 #include <linux/dma-mapping.h> 22 - #include <linux/pfn_t.h> 23 22 24 23 #ifdef CONFIG_X86 25 24 #include <asm/set_memory.h> ··· 1617 1618 return VM_FAULT_SIGBUS; 1618 1619 1619 1620 get_page(page); 1620 - return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page)); 1621 + return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn(page)); 1621 1622 } 1622 1623 1623 1624 static const struct vm_operations_struct msc_mmap_ops = {
+1 -1
drivers/md/dm-linear.c
··· 170 170 171 171 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, 172 172 long nr_pages, enum dax_access_mode mode, void **kaddr, 173 - pfn_t *pfn) 173 + unsigned long *pfn) 174 174 { 175 175 struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); 176 176
+1 -1
drivers/md/dm-log-writes.c
··· 893 893 894 894 static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, 895 895 long nr_pages, enum dax_access_mode mode, void **kaddr, 896 - pfn_t *pfn) 896 + unsigned long *pfn) 897 897 { 898 898 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); 899 899
+1 -1
drivers/md/dm-stripe.c
··· 316 316 317 317 static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, 318 318 long nr_pages, enum dax_access_mode mode, void **kaddr, 319 - pfn_t *pfn) 319 + unsigned long *pfn) 320 320 { 321 321 struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); 322 322
+1 -1
drivers/md/dm-target.c
··· 255 255 256 256 static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, 257 257 long nr_pages, enum dax_access_mode mode, void **kaddr, 258 - pfn_t *pfn) 258 + unsigned long *pfn) 259 259 { 260 260 return -EIO; 261 261 }
+5 -6
drivers/md/dm-writecache.c
··· 13 13 #include <linux/dm-io.h> 14 14 #include <linux/dm-kcopyd.h> 15 15 #include <linux/dax.h> 16 - #include <linux/pfn_t.h> 17 16 #include <linux/libnvdimm.h> 18 17 #include <linux/delay.h> 19 18 #include "dm-io-tracker.h" ··· 255 256 int r; 256 257 loff_t s; 257 258 long p, da; 258 - pfn_t pfn; 259 + unsigned long pfn; 259 260 int id; 260 261 struct page **pages; 261 262 sector_t offset; ··· 289 290 r = da; 290 291 goto err2; 291 292 } 292 - if (!pfn_t_has_page(pfn)) { 293 + if (!pfn_valid(pfn)) { 293 294 wc->memory_map = NULL; 294 295 r = -EOPNOTSUPP; 295 296 goto err2; ··· 313 314 r = daa ? daa : -EINVAL; 314 315 goto err3; 315 316 } 316 - if (!pfn_t_has_page(pfn)) { 317 + if (!pfn_valid(pfn)) { 317 318 r = -EOPNOTSUPP; 318 319 goto err3; 319 320 } 320 321 while (daa-- && i < p) { 321 - pages[i++] = pfn_t_to_page(pfn); 322 - pfn.val++; 322 + pages[i++] = pfn_to_page(pfn); 323 + pfn++; 323 324 if (!(i & 15)) 324 325 cond_resched(); 325 326 }
+1 -1
drivers/md/dm.c
··· 1218 1218 1219 1219 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1220 1220 long nr_pages, enum dax_access_mode mode, void **kaddr, 1221 - pfn_t *pfn) 1221 + unsigned long *pfn) 1222 1222 { 1223 1223 struct mapped_device *md = dax_get_private(dax_dev); 1224 1224 sector_t sector = pgoff * PAGE_SECTORS;
+3 -5
drivers/nvdimm/pmem.c
··· 20 20 #include <linux/kstrtox.h> 21 21 #include <linux/vmalloc.h> 22 22 #include <linux/blk-mq.h> 23 - #include <linux/pfn_t.h> 24 23 #include <linux/slab.h> 25 24 #include <linux/uio.h> 26 25 #include <linux/dax.h> ··· 241 242 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ 242 243 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, 243 244 long nr_pages, enum dax_access_mode mode, void **kaddr, 244 - pfn_t *pfn) 245 + unsigned long *pfn) 245 246 { 246 247 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; 247 248 sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT; ··· 253 254 if (kaddr) 254 255 *kaddr = pmem->virt_addr + offset; 255 256 if (pfn) 256 - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 257 + *pfn = PHYS_PFN(pmem->phys_addr + offset); 257 258 258 259 if (bb->count && 259 260 badblocks_check(bb, sector, num, &first_bad, &num_bad)) { ··· 302 303 303 304 static long pmem_dax_direct_access(struct dax_device *dax_dev, 304 305 pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, 305 - void **kaddr, pfn_t *pfn) 306 + void **kaddr, unsigned long *pfn) 306 307 { 307 308 struct pmem_device *pmem = dax_get_private(dax_dev); 308 309 ··· 512 513 513 514 pmem->disk = disk; 514 515 pmem->pgmap.owner = pmem; 515 - pmem->pfn_flags = 0; 516 516 if (is_nd_pfn(dev)) { 517 517 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; 518 518 pmem->pgmap.ops = &fsdax_pagemap_ops;
+1 -3
drivers/nvdimm/pmem.h
··· 5 5 #include <linux/badblocks.h> 6 6 #include <linux/memremap.h> 7 7 #include <linux/types.h> 8 - #include <linux/pfn_t.h> 9 8 #include <linux/fs.h> 10 9 11 10 enum dax_access_mode; ··· 15 16 phys_addr_t phys_addr; 16 17 /* when non-zero this device is hosting a 'pfn' instance */ 17 18 phys_addr_t data_offset; 18 - u64 pfn_flags; 19 19 void *virt_addr; 20 20 /* immutable base size of the namespace */ 21 21 size_t size; ··· 29 31 30 32 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, 31 33 long nr_pages, enum dax_access_mode mode, void **kaddr, 32 - pfn_t *pfn); 34 + unsigned long *pfn); 33 35 34 36 #ifdef CONFIG_MEMORY_FAILURE 35 37 static inline bool test_and_clear_pmem_poison(struct page *page)
+4 -5
drivers/s390/block/dcssblk.c
··· 17 17 #include <linux/blkdev.h> 18 18 #include <linux/completion.h> 19 19 #include <linux/interrupt.h> 20 - #include <linux/pfn_t.h> 21 20 #include <linux/uio.h> 22 21 #include <linux/dax.h> 23 22 #include <linux/io.h> ··· 32 33 static void dcssblk_submit_bio(struct bio *bio); 33 34 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 34 35 long nr_pages, enum dax_access_mode mode, void **kaddr, 35 - pfn_t *pfn); 36 + unsigned long *pfn); 36 37 37 38 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; 38 39 ··· 913 914 914 915 static long 915 916 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, 916 - long nr_pages, void **kaddr, pfn_t *pfn) 917 + long nr_pages, void **kaddr, unsigned long *pfn) 917 918 { 918 919 resource_size_t offset = pgoff * PAGE_SIZE; 919 920 unsigned long dev_sz; ··· 922 923 if (kaddr) 923 924 *kaddr = __va(dev_info->start + offset); 924 925 if (pfn) 925 - *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0); 926 + *pfn = PFN_DOWN(dev_info->start + offset); 926 927 927 928 return (dev_sz - offset) / PAGE_SIZE; 928 929 } ··· 930 931 static long 931 932 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 932 933 long nr_pages, enum dax_access_mode mode, void **kaddr, 933 - pfn_t *pfn) 934 + unsigned long *pfn) 934 935 { 935 936 struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); 936 937
+2 -3
drivers/vfio/pci/vfio_pci_core.c
··· 20 20 #include <linux/mutex.h> 21 21 #include <linux/notifier.h> 22 22 #include <linux/pci.h> 23 - #include <linux/pfn_t.h> 24 23 #include <linux/pm_runtime.h> 25 24 #include <linux/slab.h> 26 25 #include <linux/types.h> ··· 1668 1669 break; 1669 1670 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 1670 1671 case PMD_ORDER: 1671 - ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn, 0), false); 1672 + ret = vmf_insert_pfn_pmd(vmf, pfn, false); 1672 1673 break; 1673 1674 #endif 1674 1675 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP 1675 1676 case PUD_ORDER: 1676 - ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn, 0), false); 1677 + ret = vmf_insert_pfn_pud(vmf, pfn, false); 1677 1678 break; 1678 1679 #endif 1679 1680 default:
+2 -3
fs/cramfs/inode.c
··· 17 17 #include <linux/fs.h> 18 18 #include <linux/file.h> 19 19 #include <linux/pagemap.h> 20 - #include <linux/pfn_t.h> 21 20 #include <linux/ramfs.h> 22 21 #include <linux/init.h> 23 22 #include <linux/string.h> ··· 411 412 for (i = 0; i < pages && !ret; i++) { 412 413 vm_fault_t vmf; 413 414 unsigned long off = i * PAGE_SIZE; 414 - pfn_t pfn = phys_to_pfn_t(address + off, 0); 415 - vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn); 415 + vmf = vmf_insert_mixed(vma, vma->vm_start + off, 416 + address + off); 416 417 if (vmf & VM_FAULT_ERROR) 417 418 ret = vm_fault_to_errno(vmf, 0); 418 419 }
+25 -25
fs/dax.c
··· 20 20 #include <linux/sched/signal.h> 21 21 #include <linux/uio.h> 22 22 #include <linux/vmstat.h> 23 - #include <linux/pfn_t.h> 24 23 #include <linux/sizes.h> 25 24 #include <linux/mmu_notifier.h> 26 25 #include <linux/iomap.h> ··· 75 76 return page_folio(pfn_to_page(dax_to_pfn(entry))); 76 77 } 77 78 78 - static void *dax_make_entry(pfn_t pfn, unsigned long flags) 79 + static void *dax_make_entry(unsigned long pfn, unsigned long flags) 79 80 { 80 - return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 81 + return xa_mk_value(flags | (pfn << DAX_SHIFT)); 81 82 } 82 83 83 84 static bool dax_is_locked(void *entry) ··· 712 713 713 714 if (order > 0) 714 715 flags |= DAX_PMD; 715 - entry = dax_make_entry(pfn_to_pfn_t(0), flags); 716 + entry = dax_make_entry(0, flags); 716 717 dax_lock_entry(xas, entry); 717 718 if (xas_error(xas)) 718 719 goto out_unlock; ··· 1040 1041 * appropriate. 1041 1042 */ 1042 1043 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, 1043 - const struct iomap_iter *iter, void *entry, pfn_t pfn, 1044 + const struct iomap_iter *iter, void *entry, unsigned long pfn, 1044 1045 unsigned long flags) 1045 1046 { 1046 1047 struct address_space *mapping = vmf->vma->vm_file->f_mapping; ··· 1238 1239 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1239 1240 1240 1241 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, 1241 - size_t size, void **kaddr, pfn_t *pfnp) 1242 + size_t size, void **kaddr, unsigned long *pfnp) 1242 1243 { 1243 1244 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1244 1245 int id, rc = 0; ··· 1256 1257 rc = -EINVAL; 1257 1258 if (PFN_PHYS(length) < size) 1258 1259 goto out; 1259 - if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1260 + if (*pfnp & (PHYS_PFN(size)-1)) 1260 1261 goto out; 1261 1262 1262 1263 rc = 0; ··· 1360 1361 { 1361 1362 struct inode *inode = iter->inode; 1362 1363 unsigned long vaddr = vmf->address; 1363 - pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1364 + unsigned long pfn = my_zero_pfn(vaddr); 1364 1365 vm_fault_t ret; 1365 1366 1366 1367 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); 1367 1368 1368 - ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false); 1369 + ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false); 1369 1370 trace_dax_load_hole(inode, vmf, ret); 1370 1371 return ret; 1371 1372 } ··· 1382 1383 struct folio *zero_folio; 1383 1384 spinlock_t *ptl; 1384 1385 pmd_t pmd_entry; 1385 - pfn_t pfn; 1386 + unsigned long pfn; 1386 1387 1387 1388 zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); 1388 1389 1389 1390 if (unlikely(!zero_folio)) 1390 1391 goto fallback; 1391 1392 1392 - pfn = page_to_pfn_t(&zero_folio->page); 1393 + pfn = page_to_pfn(&zero_folio->page); 1393 1394 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, 1394 1395 DAX_PMD | DAX_ZERO_PAGE); 1395 1396 ··· 1778 1779 * insertion for now and return the pfn so that caller can insert it after the 1779 1780 * fsync is done. 1780 1781 */ 1781 - static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 1782 + static vm_fault_t dax_fault_synchronous_pfnp(unsigned long *pfnp, 1783 + unsigned long pfn) 1782 1784 { 1783 1785 if (WARN_ON_ONCE(!pfnp)) 1784 1786 return VM_FAULT_SIGBUS; ··· 1827 1827 * @pmd: distinguish whether it is a pmd fault 1828 1828 */ 1829 1829 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 1830 - const struct iomap_iter *iter, pfn_t *pfnp, 1830 + const struct iomap_iter *iter, unsigned long *pfnp, 1831 1831 struct xa_state *xas, void **entry, bool pmd) 1832 1832 { 1833 1833 const struct iomap *iomap = &iter->iomap; ··· 1838 1838 unsigned long entry_flags = pmd ? DAX_PMD : 0; 1839 1839 struct folio *folio; 1840 1840 int ret, err = 0; 1841 - pfn_t pfn; 1841 + unsigned long pfn; 1842 1842 void *kaddr; 1843 1843 1844 1844 if (!pmd && vmf->cow_page) ··· 1875 1875 1876 1876 folio_ref_inc(folio); 1877 1877 if (pmd) 1878 - ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)), 1879 - write); 1878 + ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write); 1880 1879 else 1881 - ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write); 1880 + ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write); 1882 1881 folio_put(folio); 1883 1882 1884 1883 return ret; 1885 1884 } 1886 1885 1887 - static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1886 + static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp, 1888 1887 int *iomap_errp, const struct iomap_ops *ops) 1889 1888 { 1890 1889 struct address_space *mapping = vmf->vma->vm_file->f_mapping; ··· 1995 1996 return false; 1996 1997 } 1997 1998 1998 - static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1999 + static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp, 1999 2000 const struct iomap_ops *ops) 2000 2001 { 2001 2002 struct address_space *mapping = vmf->vma->vm_file->f_mapping; ··· 2076 2077 return ret; 2077 2078 } 2078 2079 #else 2079 - static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 2080 + static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp, 2080 2081 const struct iomap_ops *ops) 2081 2082 { 2082 2083 return VM_FAULT_FALLBACK; ··· 2097 2098 * successfully. 2098 2099 */ 2099 2100 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, 2100 - pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 2101 + unsigned long *pfnp, int *iomap_errp, 2102 + const struct iomap_ops *ops) 2101 2103 { 2102 2104 if (order == 0) 2103 2105 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); ··· 2118 2118 * This function inserts a writeable PTE or PMD entry into the page tables 2119 2119 * for an mmaped DAX file. It also marks the page cache entry as dirty. 2120 2120 */ 2121 - static vm_fault_t 2122 - dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 2121 + static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, 2122 + unsigned long pfn, unsigned int order) 2123 2123 { 2124 2124 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 2125 2125 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); ··· 2141 2141 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 2142 2142 dax_lock_entry(&xas, entry); 2143 2143 xas_unlock_irq(&xas); 2144 - folio = pfn_folio(pfn_t_to_pfn(pfn)); 2144 + folio = pfn_folio(pfn); 2145 2145 folio_ref_inc(folio); 2146 2146 if (order == 0) 2147 2147 ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); ··· 2168 2168 * table entry. 2169 2169 */ 2170 2170 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, 2171 - pfn_t pfn) 2171 + unsigned long pfn) 2172 2172 { 2173 2173 int err; 2174 2174 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
+1 -1
fs/ext4/file.c
··· 747 747 bool write = (vmf->flags & FAULT_FLAG_WRITE) && 748 748 (vmf->vma->vm_flags & VM_SHARED); 749 749 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 750 - pfn_t pfn; 750 + unsigned long pfn; 751 751 752 752 if (write) { 753 753 sb_start_pagefault(sb);
+1 -2
fs/fuse/dax.c
··· 10 10 #include <linux/dax.h> 11 11 #include <linux/uio.h> 12 12 #include <linux/pagemap.h> 13 - #include <linux/pfn_t.h> 14 13 #include <linux/iomap.h> 15 14 #include <linux/interval_tree.h> 16 15 ··· 756 757 vm_fault_t ret; 757 758 struct inode *inode = file_inode(vmf->vma->vm_file); 758 759 struct super_block *sb = inode->i_sb; 759 - pfn_t pfn; 760 + unsigned long pfn; 760 761 int error = 0; 761 762 struct fuse_conn *fc = get_fuse_conn(inode); 762 763 struct fuse_conn_dax *fcd = fc->dax;
+2 -3
fs/fuse/virtio_fs.c
··· 9 9 #include <linux/pci.h> 10 10 #include <linux/interrupt.h> 11 11 #include <linux/group_cpus.h> 12 - #include <linux/pfn_t.h> 13 12 #include <linux/memremap.h> 14 13 #include <linux/module.h> 15 14 #include <linux/virtio.h> ··· 1007 1008 */ 1008 1009 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1009 1010 long nr_pages, enum dax_access_mode mode, 1010 - void **kaddr, pfn_t *pfn) 1011 + void **kaddr, unsigned long *pfn) 1011 1012 { 1012 1013 struct virtio_fs *fs = dax_get_private(dax_dev); 1013 1014 phys_addr_t offset = PFN_PHYS(pgoff); ··· 1016 1017 if (kaddr) 1017 1018 *kaddr = fs->window_kaddr + offset; 1018 1019 if (pfn) 1019 - *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0); 1020 + *pfn = fs->window_phys_addr + offset; 1020 1021 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages; 1021 1022 } 1022 1023
+1 -1
fs/xfs/xfs_file.c
··· 1730 1730 bool write_fault) 1731 1731 { 1732 1732 vm_fault_t ret; 1733 - pfn_t pfn; 1733 + unsigned long pfn; 1734 1734 1735 1735 if (!IS_ENABLED(CONFIG_FS_DAX)) { 1736 1736 ASSERT(0);
+5 -4
include/linux/dax.h
··· 26 26 * number of pages available for DAX at that pfn. 27 27 */ 28 28 long (*direct_access)(struct dax_device *, pgoff_t, long, 29 - enum dax_access_mode, void **, pfn_t *); 29 + enum dax_access_mode, void **, unsigned long *); 30 30 /* zero_page_range: required operation. Zero page range */ 31 31 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 32 32 /* ··· 241 241 bool dax_alive(struct dax_device *dax_dev); 242 242 void *dax_get_private(struct dax_device *dax_dev); 243 243 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 244 - enum dax_access_mode mode, void **kaddr, pfn_t *pfn); 244 + enum dax_access_mode mode, void **kaddr, unsigned long *pfn); 245 245 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 246 246 size_t bytes, struct iov_iter *i); 247 247 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, ··· 255 255 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 256 256 const struct iomap_ops *ops); 257 257 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, 258 - pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 258 + unsigned long *pfnp, int *errp, 259 + const struct iomap_ops *ops); 259 260 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 260 - unsigned int order, pfn_t pfn); 261 + unsigned int order, unsigned long pfn); 261 262 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 262 263 void dax_delete_mapping_range(struct address_space *mapping, 263 264 loff_t start, loff_t end);
+1 -1
include/linux/device-mapper.h
··· 156 156 */ 157 157 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, 158 158 long nr_pages, enum dax_access_mode node, void **kaddr, 159 - pfn_t *pfn); 159 + unsigned long *pfn); 160 160 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, 161 161 size_t nr_pages); 162 162
+4 -2
include/linux/huge_mm.h
··· 37 37 pmd_t *pmd, unsigned long addr, pgprot_t newprot, 38 38 unsigned long cp_flags); 39 39 40 - vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); 41 - vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); 40 + vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn, 41 + bool write); 42 + vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, 43 + bool write); 42 44 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, 43 45 bool write); 44 46 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+2 -2
include/linux/mm.h
··· 3522 3522 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 3523 3523 unsigned long pfn, pgprot_t pgprot); 3524 3524 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 3525 - pfn_t pfn); 3525 + unsigned long pfn); 3526 3526 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 3527 - unsigned long addr, pfn_t pfn); 3527 + unsigned long addr, unsigned long pfn); 3528 3528 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 3529 3529 3530 3530 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
-9
include/linux/pfn.h
··· 4 4 5 5 #ifndef __ASSEMBLY__ 6 6 #include <linux/types.h> 7 - 8 - /* 9 - * pfn_t: encapsulates a page-frame number that is optionally backed 10 - * by memmap (struct page). Whether a pfn_t has a 'struct page' 11 - * backing is indicated by flags in the high bits of the value. 12 - */ 13 - typedef struct { 14 - u64 val; 15 - } pfn_t; 16 7 #endif 17 8 18 9 #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
-85
include/linux/pfn_t.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _LINUX_PFN_T_H_ 3 - #define _LINUX_PFN_T_H_ 4 - #include <linux/mm.h> 5 - 6 - /* 7 - * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags 8 - */ 9 - #define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) 10 - 11 - #define PFN_FLAGS_TRACE \ 12 - { } 13 - 14 - static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) 15 - { 16 - pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; 17 - 18 - return pfn_t; 19 - } 20 - 21 - /* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */ 22 - static inline pfn_t pfn_to_pfn_t(unsigned long pfn) 23 - { 24 - return __pfn_to_pfn_t(pfn, 0); 25 - } 26 - 27 - static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) 28 - { 29 - return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); 30 - } 31 - 32 - static inline bool pfn_t_has_page(pfn_t pfn) 33 - { 34 - return true; 35 - } 36 - 37 - static inline unsigned long pfn_t_to_pfn(pfn_t pfn) 38 - { 39 - return pfn.val & ~PFN_FLAGS_MASK; 40 - } 41 - 42 - static inline struct page *pfn_t_to_page(pfn_t pfn) 43 - { 44 - if (pfn_t_has_page(pfn)) 45 - return pfn_to_page(pfn_t_to_pfn(pfn)); 46 - return NULL; 47 - } 48 - 49 - static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) 50 - { 51 - return PFN_PHYS(pfn_t_to_pfn(pfn)); 52 - } 53 - 54 - static inline pfn_t page_to_pfn_t(struct page *page) 55 - { 56 - return pfn_to_pfn_t(page_to_pfn(page)); 57 - } 58 - 59 - static inline int pfn_t_valid(pfn_t pfn) 60 - { 61 - return pfn_valid(pfn_t_to_pfn(pfn)); 62 - } 63 - 64 - #ifdef CONFIG_MMU 65 - static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot) 66 - { 67 - return pfn_pte(pfn_t_to_pfn(pfn), pgprot); 68 - } 69 - #endif 70 - 71 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 72 - static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) 73 - { 74 - return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); 75 - } 76 - 77 - #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 78 - static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) 79 - { 80 - return pfn_pud(pfn_t_to_pfn(pfn), pgprot); 81 - } 82 - #endif 83 - #endif 84 - 85 - #endif /* _LINUX_PFN_T_H_ */
-1
mm/debug_vm_pgtable.c
··· 20 20 #include <linux/mman.h> 21 21 #include <linux/mm_types.h> 22 22 #include <linux/module.h> 23 - #include <linux/pfn_t.h> 24 23 #include <linux/printk.h> 25 24 #include <linux/pgtable.h> 26 25 #include <linux/random.h>
+11 -10
mm/huge_memory.c
··· 22 22 #include <linux/mm_types.h> 23 23 #include <linux/khugepaged.h> 24 24 #include <linux/freezer.h> 25 - #include <linux/pfn_t.h> 26 25 #include <linux/mman.h> 27 26 #include <linux/memremap.h> 28 27 #include <linux/pagemap.h> ··· 1374 1375 struct folio_or_pfn { 1375 1376 union { 1376 1377 struct folio *folio; 1377 - pfn_t pfn; 1378 + unsigned long pfn; 1378 1379 }; 1379 1380 bool is_folio; 1380 1381 }; ··· 1390 1391 1391 1392 if (!pmd_none(*pmd)) { 1392 1393 const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) : 1393 - pfn_t_to_pfn(fop.pfn); 1394 + fop.pfn; 1394 1395 1395 1396 if (write) { 1396 1397 if (pmd_pfn(*pmd) != pfn) { ··· 1413 1414 folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma); 1414 1415 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR); 1415 1416 } else { 1416 - entry = pmd_mkhuge(pfn_t_pmd(fop.pfn, prot)); 1417 + entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot)); 1417 1418 entry = pmd_mkspecial(entry); 1418 1419 } 1419 1420 if (write) { ··· 1441 1442 * 1442 1443 * Return: vm_fault_t value. 1443 1444 */ 1444 - vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) 1445 + vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn, 1446 + bool write) 1445 1447 { 1446 1448 unsigned long addr = vmf->address & PMD_MASK; 1447 1449 struct vm_area_struct *vma = vmf->vma; ··· 1473 1473 return VM_FAULT_OOM; 1474 1474 } 1475 1475 1476 - pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot); 1476 + pfnmap_setup_cachemode_pfn(pfn, &pgprot); 1477 1477 1478 1478 ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1479 1479 error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write, ··· 1539 1539 1540 1540 if (!pud_none(*pud)) { 1541 1541 const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) : 1542 - pfn_t_to_pfn(fop.pfn); 1542 + fop.pfn; 1543 1543 1544 1544 if (write) { 1545 1545 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn)) ··· 1559 1559 folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma); 1560 1560 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR); 1561 1561 } else { 1562 - entry = pud_mkhuge(pfn_t_pud(fop.pfn, prot)); 1562 + entry = pud_mkhuge(pfn_pud(fop.pfn, prot)); 1563 1563 entry = pud_mkspecial(entry); 1564 1564 } 1565 1565 if (write) { ··· 1580 1580 * 1581 1581 * Return: vm_fault_t value. 1582 1582 */ 1583 - vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) 1583 + vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, 1584 + bool write) 1584 1585 { 1585 1586 unsigned long addr = vmf->address & PUD_MASK; 1586 1587 struct vm_area_struct *vma = vmf->vma; ··· 1604 1603 if (addr < vma->vm_start || addr >= vma->vm_end) 1605 1604 return VM_FAULT_SIGBUS; 1606 1605 1607 - pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot); 1606 + pfnmap_setup_cachemode_pfn(pfn, &pgprot); 1608 1607 1609 1608 ptl = pud_lock(vma->vm_mm, vmf->pud); 1610 1609 insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
+15 -16
mm/memory.c
··· 57 57 #include <linux/export.h> 58 58 #include <linux/delayacct.h> 59 59 #include <linux/init.h> 60 - #include <linux/pfn_t.h> 61 60 #include <linux/writeback.h> 62 61 #include <linux/memcontrol.h> 63 62 #include <linux/mmu_notifier.h> ··· 2434 2435 EXPORT_SYMBOL(vm_map_pages_zero); 2435 2436 2436 2437 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2437 - pfn_t pfn, pgprot_t prot, bool mkwrite) 2438 + unsigned long pfn, pgprot_t prot, bool mkwrite) 2438 2439 { 2439 2440 struct mm_struct *mm = vma->vm_mm; 2440 2441 pte_t *pte, entry; ··· 2456 2457 * allocation and mapping invalidation so just skip the 2457 2458 * update. 2458 2459 */ 2459 - if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { 2460 + if (pte_pfn(entry) != pfn) { 2460 2461 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); 2461 2462 goto out_unlock; 2462 2463 } ··· 2469 2470 } 2470 2471 2471 2472 /* Ok, finally just insert the thing.. */ 2472 - entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 2473 + entry = pte_mkspecial(pfn_pte(pfn, prot)); 2473 2474 2474 2475 if (mkwrite) { 2475 2476 entry = pte_mkyoung(entry); ··· 2540 2541 2541 2542 pfnmap_setup_cachemode_pfn(pfn, &pgprot); 2542 2543 2543 - return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot, 2544 - false); 2544 + return insert_pfn(vma, addr, pfn, pgprot, false); 2545 2545 } 2546 2546 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2547 2547 ··· 2571 2573 } 2572 2574 EXPORT_SYMBOL(vmf_insert_pfn); 2573 2575 2574 - static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite) 2576 + static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn, 2577 + bool mkwrite) 2575 2578 { 2576 - if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) && 2579 + if (unlikely(is_zero_pfn(pfn)) && 2577 2580 (mkwrite || !vm_mixed_zeropage_allowed(vma))) 2578 2581 return false; 2579 2582 /* these checks mirror the abort conditions in vm_normal_page */ 2580 2583 if (vma->vm_flags & VM_MIXEDMAP) 2581 2584 return true; 2582 - if (is_zero_pfn(pfn_t_to_pfn(pfn))) 2585 + if (is_zero_pfn(pfn)) 2583 2586 return true; 2584 2587 return false; 2585 2588 } 2586 2589 2587 2590 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2588 - unsigned long addr, pfn_t pfn, bool mkwrite) 2591 + unsigned long addr, unsigned long pfn, bool mkwrite) 2589 2592 { 2590 2593 pgprot_t pgprot = vma->vm_page_prot; 2591 2594 int err; ··· 2597 2598 if (addr < vma->vm_start || addr >= vma->vm_end) 2598 2599 return VM_FAULT_SIGBUS; 2599 2600 2600 - pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot); 2601 + pfnmap_setup_cachemode_pfn(pfn, &pgprot); 2601 2602 2602 - if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 2603 + if (!pfn_modify_allowed(pfn, pgprot)) 2603 2604 return VM_FAULT_SIGBUS; 2604 2605 2605 2606 /* ··· 2609 2610 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2610 2611 * without pte special, it would there be refcounted as a normal page. 2611 2612 */ 2612 - if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_t_valid(pfn)) { 2613 + if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) { 2613 2614 struct page *page; 2614 2615 2615 2616 /* ··· 2617 2618 * regardless of whether the caller specified flags that 2618 2619 * result in pfn_t_has_page() == false. 2619 2620 */ 2620 - page = pfn_to_page(pfn_t_to_pfn(pfn)); 2621 + page = pfn_to_page(pfn); 2621 2622 err = insert_page(vma, addr, page, pgprot, mkwrite); 2622 2623 } else { 2623 2624 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); ··· 2652 2653 EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite); 2653 2654 2654 2655 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2655 - pfn_t pfn) 2656 + unsigned long pfn) 2656 2657 { 2657 2658 return __vm_insert_mixed(vma, addr, pfn, false); 2658 2659 } ··· 2664 2665 * the same entry was actually inserted. 2665 2666 */ 2666 2667 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2667 - unsigned long addr, pfn_t pfn) 2668 + unsigned long addr, unsigned long pfn) 2668 2669 { 2669 2670 return __vm_insert_mixed(vma, addr, pfn, true); 2670 2671 }
-1
mm/memremap.c
··· 5 5 #include <linux/kasan.h> 6 6 #include <linux/memory_hotplug.h> 7 7 #include <linux/memremap.h> 8 - #include <linux/pfn_t.h> 9 8 #include <linux/swap.h> 10 9 #include <linux/mm.h> 11 10 #include <linux/mmzone.h>
-1
mm/migrate.c
··· 35 35 #include <linux/compat.h> 36 36 #include <linux/hugetlb.h> 37 37 #include <linux/gfp.h> 38 - #include <linux/pfn_t.h> 39 38 #include <linux/page_idle.h> 40 39 #include <linux/page_owner.h> 41 40 #include <linux/sched/mm.h>
+3 -3
tools/testing/nvdimm/pmem-dax.c
··· 10 10 11 11 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, 12 12 long nr_pages, enum dax_access_mode mode, void **kaddr, 13 - pfn_t *pfn) 13 + unsigned long *pfn) 14 14 { 15 15 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; 16 16 ··· 29 29 *kaddr = pmem->virt_addr + offset; 30 30 page = vmalloc_to_page(pmem->virt_addr + offset); 31 31 if (pfn) 32 - *pfn = page_to_pfn_t(page); 32 + *pfn = page_to_pfn(page); 33 33 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n", 34 34 __func__, pmem, pgoff, page_to_pfn(page)); 35 35 ··· 39 39 if (kaddr) 40 40 *kaddr = pmem->virt_addr + offset; 41 41 if (pfn) 42 - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); 42 + *pfn = PHYS_PFN(pmem->phys_addr + offset); 43 43 44 44 /* 45 45 * If badblocks are present, limit known good range to the
-7
tools/testing/nvdimm/test/iomap.c
··· 8 8 #include <linux/ioport.h> 9 9 #include <linux/module.h> 10 10 #include <linux/types.h> 11 - #include <linux/pfn_t.h> 12 11 #include <linux/acpi.h> 13 12 #include <linux/io.h> 14 13 #include <linux/mm.h> ··· 133 134 return nfit_res->buf + offset - nfit_res->res.start; 134 135 } 135 136 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages); 136 - 137 - pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags) 138 - { 139 - return phys_to_pfn_t(addr, flags); 140 - } 141 - EXPORT_SYMBOL(__wrap_phys_to_pfn_t); 142 137 143 138 void *__wrap_memremap(resource_size_t offset, size_t size, 144 139 unsigned long flags)
-1
tools/testing/nvdimm/test/nfit_test.h
··· 212 212 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset, 213 213 size_t size, unsigned long flags); 214 214 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); 215 - pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags); 216 215 void *__wrap_memremap(resource_size_t offset, size_t size, 217 216 unsigned long flags); 218 217 void __wrap_devm_memunmap(struct device *dev, void *addr);