Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: update all remaining mmap_prepare users to use vma_flags_t

We will be shortly removing the vm_flags_t field from vm_area_desc so we
need to update all mmap_prepare users to only use the dessc->vma_flags
field.

This patch achieves that and makes all ancillary changes required to make
this possible.

This lays the groundwork for future work to eliminate the use of
vm_flags_t in vm_area_desc altogether and more broadly throughout the
kernel.

While we're here, we take the opportunity to replace VM_REMAP_FLAGS with
VMA_REMAP_FLAGS, the vma_flags_t equivalent.

No functional changes intended.

Link: https://lkml.kernel.org/r/fb1f55323799f09fe6a36865b31550c9ec67c225.1769097829.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Damien Le Moal <dlemoal@kernel.org> [zonefs]
Acked-by: "Darrick J. Wong" <djwong@kernel.org>
Acked-by: Pedro Falcato <pfalcato@suse.de>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Yury Norov <ynorov@nvidia.com>
Cc: Chris Mason <clm@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
5bd2c065 590d356a

+56 -41
+3 -3
drivers/char/mem.c
··· 306 306 /* can't do an in-place private mapping if there's no MMU */ 307 307 static inline int private_mapping_ok(struct vm_area_desc *desc) 308 308 { 309 - return is_nommu_shared_mapping(desc->vm_flags); 309 + return is_nommu_shared_vma_flags(&desc->vma_flags); 310 310 } 311 311 #else 312 312 ··· 360 360 361 361 desc->vm_ops = &mmap_mem_ops; 362 362 363 - /* Remap-pfn-range will mark the range VM_IO. */ 363 + /* Remap-pfn-range will mark the range with the I/O flag. */ 364 364 mmap_action_remap_full(desc, desc->pgoff); 365 365 /* We filter remap errors to -EAGAIN. */ 366 366 desc->action.error_hook = mmap_filter_error; ··· 520 520 #ifndef CONFIG_MMU 521 521 return -ENOSYS; 522 522 #endif 523 - if (desc->vm_flags & VM_SHARED) 523 + if (vma_desc_test_flags(desc, VMA_SHARED_BIT)) 524 524 return shmem_zero_setup_desc(desc); 525 525 526 526 desc->action.success_hook = mmap_zero_private_success;
+5 -5
drivers/dax/device.c
··· 13 13 #include "dax-private.h" 14 14 #include "bus.h" 15 15 16 - static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags, 16 + static int __check_vma(struct dev_dax *dev_dax, vma_flags_t flags, 17 17 unsigned long start, unsigned long end, struct file *file, 18 18 const char *func) 19 19 { ··· 24 24 return -ENXIO; 25 25 26 26 /* prevent private mappings from being established */ 27 - if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 27 + if (!vma_flags_test(&flags, VMA_MAYSHARE_BIT)) { 28 28 dev_info_ratelimited(dev, 29 29 "%s: %s: fail, attempted private mapping\n", 30 30 current->comm, func); ··· 53 53 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, 54 54 const char *func) 55 55 { 56 - return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end, 56 + return __check_vma(dev_dax, vma->flags, vma->vm_start, vma->vm_end, 57 57 vma->vm_file, func); 58 58 } 59 59 ··· 306 306 * fault time. 307 307 */ 308 308 id = dax_read_lock(); 309 - rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp, 309 + rc = __check_vma(dev_dax, desc->vma_flags, desc->start, desc->end, filp, 310 310 __func__); 311 311 dax_read_unlock(id); 312 312 if (rc) 313 313 return rc; 314 314 315 315 desc->vm_ops = &dax_vm_ops; 316 - desc->vm_flags |= VM_HUGEPAGE; 316 + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); 317 317 return 0; 318 318 } 319 319
+1 -1
fs/aio.c
··· 394 394 395 395 static int aio_ring_mmap_prepare(struct vm_area_desc *desc) 396 396 { 397 - desc->vm_flags |= VM_DONTEXPAND; 397 + vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT); 398 398 desc->vm_ops = &aio_ring_vm_ops; 399 399 return 0; 400 400 }
+3 -2
fs/erofs/data.c
··· 438 438 if (!IS_DAX(file_inode(desc->file))) 439 439 return generic_file_readonly_mmap_prepare(desc); 440 440 441 - if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE)) 441 + if (vma_desc_test_flags(desc, VMA_SHARED_BIT) && 442 + vma_desc_test_flags(desc, VMA_MAYWRITE_BIT)) 442 443 return -EINVAL; 443 444 444 445 desc->vm_ops = &erofs_dax_vm_ops; 445 - desc->vm_flags |= VM_HUGEPAGE; 446 + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); 446 447 return 0; 447 448 } 448 449 #else
+2 -2
fs/ext4/file.c
··· 822 822 * We don't support synchronous mappings for non-DAX files and 823 823 * for DAX files if underneath dax_device is not synchronous. 824 824 */ 825 - if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev)) 825 + if (!daxdev_mapping_supported(desc, file_inode(file), dax_dev)) 826 826 return -EOPNOTSUPP; 827 827 828 828 file_accessed(file); 829 829 if (IS_DAX(file_inode(file))) { 830 830 desc->vm_ops = &ext4_dax_vm_ops; 831 - desc->vm_flags |= VM_HUGEPAGE; 831 + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); 832 832 } else { 833 833 desc->vm_ops = &ext4_file_vm_ops; 834 834 }
+1 -1
fs/ntfs3/file.c
··· 347 347 struct inode *inode = file_inode(file); 348 348 struct ntfs_inode *ni = ntfs_i(inode); 349 349 u64 from = ((u64)desc->pgoff << PAGE_SHIFT); 350 - bool rw = desc->vm_flags & VM_WRITE; 350 + const bool rw = vma_desc_test_flags(desc, VMA_WRITE_BIT); 351 351 int err; 352 352 353 353 /* Avoid any operation if inode is bad. */
+2 -2
fs/orangefs/file.c
··· 411 411 "orangefs_file_mmap: called on %pD\n", file); 412 412 413 413 /* set the sequential readahead hint */ 414 - desc->vm_flags |= VM_SEQ_READ; 415 - desc->vm_flags &= ~VM_RAND_READ; 414 + vma_desc_set_flags(desc, VMA_SEQ_READ_BIT); 415 + vma_desc_clear_flags(desc, VMA_RAND_READ_BIT); 416 416 417 417 file_accessed(file); 418 418 desc->vm_ops = &orangefs_file_vm_ops;
+1 -1
fs/ramfs/file-nommu.c
··· 264 264 */ 265 265 static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc) 266 266 { 267 - if (!is_nommu_shared_mapping(desc->vm_flags)) 267 + if (!is_nommu_shared_vma_flags(&desc->vma_flags)) 268 268 return -ENOSYS; 269 269 270 270 file_accessed(desc->file);
+1 -1
fs/resctrl/pseudo_lock.c
··· 1044 1044 * Ensure changes are carried directly to the memory being mapped, 1045 1045 * do not allow copy-on-write mapping. 1046 1046 */ 1047 - if (!(desc->vm_flags & VM_SHARED)) { 1047 + if (!vma_desc_test_flags(desc, VMA_SHARED_BIT)) { 1048 1048 mutex_unlock(&rdtgroup_mutex); 1049 1049 return -EINVAL; 1050 1050 }
+1 -1
fs/romfs/mmap-nommu.c
··· 63 63 */ 64 64 static int romfs_mmap_prepare(struct vm_area_desc *desc) 65 65 { 66 - return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS; 66 + return is_nommu_shared_vma_flags(&desc->vma_flags) ? 0 : -ENOSYS; 67 67 } 68 68 69 69 static unsigned romfs_mmap_capabilities(struct file *file)
+2 -2
fs/xfs/xfs_file.c
··· 1974 1974 * We don't support synchronous mappings for non-DAX files and 1975 1975 * for DAX files if underneath dax_device is not synchronous. 1976 1976 */ 1977 - if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), 1977 + if (!daxdev_mapping_supported(desc, file_inode(file), 1978 1978 target->bt_daxdev)) 1979 1979 return -EOPNOTSUPP; 1980 1980 1981 1981 file_accessed(file); 1982 1982 desc->vm_ops = &xfs_file_vm_ops; 1983 1983 if (IS_DAX(inode)) 1984 - desc->vm_flags |= VM_HUGEPAGE; 1984 + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); 1985 1985 return 0; 1986 1986 } 1987 1987
+2 -1
fs/zonefs/file.c
··· 333 333 * ordering between msync() and page cache writeback. 334 334 */ 335 335 if (zonefs_inode_is_seq(file_inode(file)) && 336 - (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE)) 336 + vma_desc_test_flags(desc, VMA_SHARED_BIT) && 337 + vma_desc_test_flags(desc, VMA_MAYWRITE_BIT)) 337 338 return -EINVAL; 338 339 339 340 file_accessed(file);
+4 -4
include/linux/dax.h
··· 65 65 /* 66 66 * Check if given mapping is supported by the file / underlying device. 67 67 */ 68 - static inline bool daxdev_mapping_supported(vm_flags_t vm_flags, 68 + static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc, 69 69 const struct inode *inode, 70 70 struct dax_device *dax_dev) 71 71 { 72 - if (!(vm_flags & VM_SYNC)) 72 + if (!vma_desc_test_flags(desc, VMA_SYNC_BIT)) 73 73 return true; 74 74 if (!IS_DAX(inode)) 75 75 return false; ··· 111 111 static inline void set_dax_synchronous(struct dax_device *dax_dev) 112 112 { 113 113 } 114 - static inline bool daxdev_mapping_supported(vm_flags_t vm_flags, 114 + static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc, 115 115 const struct inode *inode, 116 116 struct dax_device *dax_dev) 117 117 { 118 - return !(vm_flags & VM_SYNC); 118 + return !vma_desc_test_flags(desc, VMA_SYNC_BIT); 119 119 } 120 120 static inline size_t dax_recovery_write(struct dax_device *dax_dev, 121 121 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+19 -5
include/linux/mm.h
··· 550 550 /* 551 551 * Physically remapped pages are special. Tell the 552 552 * rest of the world about it: 553 - * VM_IO tells people not to look at these pages 553 + * IO tells people not to look at these pages 554 554 * (accesses can have side effects). 555 - * VM_PFNMAP tells the core MM that the base pages are just 555 + * PFNMAP tells the core MM that the base pages are just 556 556 * raw PFN mappings, and do not have a "struct page" associated 557 557 * with them. 558 - * VM_DONTEXPAND 558 + * DONTEXPAND 559 559 * Disable vma merging and expanding with mremap(). 560 - * VM_DONTDUMP 560 + * DONTDUMP 561 561 * Omit vma from core dump, even when VM_IO turned off. 562 562 */ 563 - #define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) 563 + #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \ 564 + VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT) 564 565 565 566 /* This mask prevents VMA from being scanned with khugepaged */ 566 567 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) ··· 1926 1925 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 1927 1926 } 1928 1927 1928 + static inline bool vma_desc_is_cow_mapping(struct vm_area_desc *desc) 1929 + { 1930 + const vma_flags_t *flags = &desc->vma_flags; 1931 + 1932 + return vma_flags_test(flags, VMA_MAYWRITE_BIT) && 1933 + !vma_flags_test(flags, VMA_SHARED_BIT); 1934 + } 1935 + 1929 1936 #ifndef CONFIG_MMU 1930 1937 static inline bool is_nommu_shared_mapping(vm_flags_t flags) 1931 1938 { ··· 1946 1937 * write permissions later. 1947 1938 */ 1948 1939 return flags & (VM_MAYSHARE | VM_MAYOVERLAY); 1940 + } 1941 + 1942 + static inline bool is_nommu_shared_vma_flags(const vma_flags_t *flags) 1943 + { 1944 + return vma_flags_test(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT); 1949 1945 } 1950 1946 #endif 1951 1947
+1 -1
kernel/relay.c
··· 92 92 return -EINVAL; 93 93 94 94 desc->vm_ops = &relay_file_mmap_ops; 95 - desc->vm_flags |= VM_DONTEXPAND; 95 + vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT); 96 96 desc->private_data = buf; 97 97 98 98 return 0;
+8 -9
mm/memory.c
··· 2957 2957 return 0; 2958 2958 } 2959 2959 2960 - static int get_remap_pgoff(vm_flags_t vm_flags, unsigned long addr, 2960 + static int get_remap_pgoff(bool is_cow, unsigned long addr, 2961 2961 unsigned long end, unsigned long vm_start, unsigned long vm_end, 2962 2962 unsigned long pfn, pgoff_t *vm_pgoff_p) 2963 2963 { ··· 2967 2967 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2968 2968 * See vm_normal_page() for details. 2969 2969 */ 2970 - if (is_cow_mapping(vm_flags)) { 2970 + if (is_cow) { 2971 2971 if (addr != vm_start || end != vm_end) 2972 2972 return -EINVAL; 2973 2973 *vm_pgoff_p = pfn; ··· 2988 2988 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2989 2989 return -EINVAL; 2990 2990 2991 - VM_WARN_ON_ONCE((vma->vm_flags & VM_REMAP_FLAGS) != VM_REMAP_FLAGS); 2991 + VM_WARN_ON_ONCE(!vma_test_all_flags_mask(vma, VMA_REMAP_FLAGS)); 2992 2992 2993 2993 BUG_ON(addr >= end); 2994 2994 pfn -= addr >> PAGE_SHIFT; ··· 3112 3112 * check it again on complete and will fail there if specified addr is 3113 3113 * invalid. 3114 3114 */ 3115 - get_remap_pgoff(desc->vm_flags, desc->start, desc->end, 3115 + get_remap_pgoff(vma_desc_is_cow_mapping(desc), desc->start, desc->end, 3116 3116 desc->start, desc->end, pfn, &desc->pgoff); 3117 - desc->vm_flags |= VM_REMAP_FLAGS; 3117 + vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS); 3118 3118 } 3119 3119 3120 3120 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned long addr, ··· 3123 3123 unsigned long end = addr + PAGE_ALIGN(size); 3124 3124 int err; 3125 3125 3126 - err = get_remap_pgoff(vma->vm_flags, addr, end, 3127 - vma->vm_start, vma->vm_end, 3128 - pfn, &vma->vm_pgoff); 3126 + err = get_remap_pgoff(is_cow_mapping(vma->vm_flags), addr, end, 3127 + vma->vm_start, vma->vm_end, pfn, &vma->vm_pgoff); 3129 3128 if (err) 3130 3129 return err; 3131 3130 3132 - vm_flags_set(vma, VM_REMAP_FLAGS); 3131 + vma_set_flags_mask(vma, VMA_REMAP_FLAGS); 3133 3132 return 0; 3134 3133 } 3135 3134