Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: convert do_brk_flags() to use vma_flags_t

In order to be able to do this, we need to change VM_DATA_DEFAULT_FLAGS
and friends and update the architecture-specific definitions also.

We then have to update some KSM logic to handle VMA flags, and introduce
VMA_STACK_FLAGS to define the vma_flags_t equivalent of VM_STACK_FLAGS.

We also introduce two helper functions for use during the time we are
converting legacy flags to vma_flags_t values - vma_flags_to_legacy() and
legacy_to_vma_flags().

This enables us to iteratively make changes to break these changes up into
separate parts.

We use these explicitly here to keep VM_STACK_FLAGS around for certain
users which need to maintain the legacy vm_flags_t values for the time
being.

We are no longer able to rely on the simple VM_xxx being set to zero if
the feature is not enabled, so in the case of VM_DROPPABLE we introduce
VMA_DROPPABLE as the vma_flags_t equivalent, which is set to
EMPTY_VMA_FLAGS if the droppable flag is not available.

While we're here, we make the description of do_brk_flags() into a kdoc
comment, as it almost was already.

We use vma_flags_to_legacy() to not need to update the vm_get_page_prot()
logic as this time.

Note that in create_init_stack_vma() we have to replace the BUILD_BUG_ON()
with a VM_WARN_ON_ONCE() as the tested values are no longer build time
available.

We also update mprotect_fixup() to use VMA flags where possible, though we
have to live with a little duplication between vm_flags_t and vma_flags_t
values for the time being until further conversions are made.

While we're here, update VM_SPECIAL to be defined in terms of
VMA_SPECIAL_FLAGS now we have vma_flags_to_legacy().

Finally, we update the VMA tests to reflect these changes.

Link: https://lkml.kernel.org/r/d02e3e45d9a33d7904b149f5604904089fd640ae.1774034900.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Acked-by: Paul Moore <paul@paul-moore.com> [SELinux]
Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Kees Cook <kees@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stephen Smalley <stephen.smalley.work@gmail.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes (Oracle) and committed by
Andrew Morton
3a6455d5 bbbc17cb

+189 -138
+1 -1
arch/arc/include/asm/page.h
··· 131 131 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 132 132 133 133 /* Default Permissions for stack/heaps pages (Non Executable) */ 134 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 134 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC 135 135 136 136 #define WANT_PAGE_VIRTUAL 1 137 137
+1 -1
arch/arm/include/asm/page.h
··· 184 184 185 185 #include <asm/memory.h> 186 186 187 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 187 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC 188 188 189 189 #include <asm-generic/getorder.h> 190 190 #include <asm-generic/memory_model.h>
+6 -1
arch/arm64/include/asm/page.h
··· 46 46 47 47 #endif /* !__ASSEMBLER__ */ 48 48 49 - #define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) 49 + #ifdef CONFIG_ARM64_MTE 50 + #define VMA_DATA_DEFAULT_FLAGS append_vma_flags(VMA_DATA_FLAGS_TSK_EXEC, \ 51 + VMA_MTE_ALLOWED_BIT) 52 + #else 53 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC 54 + #endif 50 55 51 56 #include <asm-generic/getorder.h> 52 57
+1 -1
arch/hexagon/include/asm/page.h
··· 90 90 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) 91 91 92 92 /* Default vm area behavior is non-executable. */ 93 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 93 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC 94 94 95 95 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 96 96
+1 -1
arch/loongarch/include/asm/page.h
··· 104 104 extern int __virt_addr_valid(volatile void *kaddr); 105 105 #define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) 106 106 107 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 107 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC 108 108 109 109 #include <asm-generic/memory_model.h> 110 110 #include <asm-generic/getorder.h>
+1 -1
arch/mips/include/asm/page.h
··· 213 213 #define virt_addr_valid(kaddr) \ 214 214 __virt_addr_valid((const volatile void *) (kaddr)) 215 215 216 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 216 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC 217 217 218 218 extern unsigned long __kaslr_offset; 219 219 static inline unsigned long kaslr_offset(void)
+1 -1
arch/nios2/include/asm/page.h
··· 85 85 # define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr))) 86 86 # define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr))) 87 87 88 - # define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 88 + # define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC 89 89 90 90 #include <asm-generic/memory_model.h> 91 91
+2 -2
arch/powerpc/include/asm/page.h
··· 240 240 * and needs to be executable. This means the whole heap ends 241 241 * up being executable. 242 242 */ 243 - #define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC 244 - #define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC 243 + #define VMA_DATA_DEFAULT_FLAGS32 VMA_DATA_FLAGS_TSK_EXEC 244 + #define VMA_DATA_DEFAULT_FLAGS64 VMA_DATA_FLAGS_NON_EXEC 245 245 246 246 #ifdef __powerpc64__ 247 247 #include <asm/page_64.h>
+1 -1
arch/powerpc/include/asm/page_32.h
··· 10 10 #endif 11 11 #endif 12 12 13 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 13 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS32 14 14 15 15 #if defined(CONFIG_PPC_256K_PAGES) || \ 16 16 (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
+6 -6
arch/powerpc/include/asm/page_64.h
··· 84 84 85 85 #endif /* __ASSEMBLER__ */ 86 86 87 - #define VM_DATA_DEFAULT_FLAGS \ 87 + #define VMA_DATA_DEFAULT_FLAGS \ 88 88 (is_32bit_task() ? \ 89 - VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) 89 + VMA_DATA_DEFAULT_FLAGS32 : VMA_DATA_DEFAULT_FLAGS64) 90 90 91 91 /* 92 92 * This is the default if a program doesn't have a PT_GNU_STACK ··· 94 94 * stack by default, so in the absence of a PT_GNU_STACK program header 95 95 * we turn execute permission off. 96 96 */ 97 - #define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC 98 - #define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC 97 + #define VMA_STACK_DEFAULT_FLAGS32 VMA_DATA_FLAGS_EXEC 98 + #define VMA_STACK_DEFAULT_FLAGS64 VMA_DATA_FLAGS_NON_EXEC 99 99 100 - #define VM_STACK_DEFAULT_FLAGS \ 100 + #define VMA_STACK_DEFAULT_FLAGS \ 101 101 (is_32bit_task() ? \ 102 - VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) 102 + VMA_STACK_DEFAULT_FLAGS32 : VMA_STACK_DEFAULT_FLAGS64) 103 103 104 104 #include <asm-generic/getorder.h> 105 105
+1 -1
arch/riscv/include/asm/page.h
··· 204 204 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ 205 205 }) 206 206 207 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 207 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC 208 208 209 209 #include <asm-generic/memory_model.h> 210 210 #include <asm-generic/getorder.h>
+1 -1
arch/s390/include/asm/page.h
··· 277 277 278 278 #define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr)))) 279 279 280 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 280 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC 281 281 282 282 #endif /* !__ASSEMBLER__ */ 283 283
+1 -1
arch/x86/include/asm/page_types.h
··· 26 26 27 27 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 28 28 29 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 29 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC 30 30 31 31 /* Physical address where kernel should be loaded. */ 32 32 #define LOAD_PHYSICAL_ADDR __ALIGN_KERNEL_MASK(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN - 1)
+2 -2
arch/x86/um/asm/vm-flags.h
··· 9 9 10 10 #ifdef CONFIG_X86_32 11 11 12 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 12 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC 13 13 14 14 #else 15 15 16 - #define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_DATA_FLAGS_EXEC) 16 + #define VMA_STACK_DEFAULT_FLAGS append_vma_flags(VMA_DATA_FLAGS_EXEC, VMA_GROWSDOWN_BIT) 17 17 18 18 #endif 19 19 #endif
+5 -5
include/linux/ksm.h
··· 17 17 #ifdef CONFIG_KSM 18 18 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 19 19 unsigned long end, int advice, vm_flags_t *vm_flags); 20 - vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, 21 - vm_flags_t vm_flags); 20 + vma_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, 21 + vma_flags_t vma_flags); 22 22 int ksm_enable_merge_any(struct mm_struct *mm); 23 23 int ksm_disable_merge_any(struct mm_struct *mm); 24 24 int ksm_disable(struct mm_struct *mm); ··· 103 103 104 104 #else /* !CONFIG_KSM */ 105 105 106 - static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm, 107 - const struct file *file, vm_flags_t vm_flags) 106 + static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm, 107 + const struct file *file, vma_flags_t vma_flags) 108 108 { 109 - return vm_flags; 109 + return vma_flags; 110 110 } 111 111 112 112 static inline int ksm_disable(struct mm_struct *mm)
+29 -18
include/linux/mm.h
··· 346 346 * if KVM does not lock down the memory type. 347 347 */ 348 348 DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39), 349 - #ifdef CONFIG_PPC32 349 + #if defined(CONFIG_PPC32) 350 350 DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1), 351 - #else 351 + #elif defined(CONFIG_64BIT) 352 352 DECLARE_VMA_BIT(DROPPABLE, 40), 353 353 #endif 354 354 DECLARE_VMA_BIT(UFFD_MINOR, 41), ··· 503 503 #endif 504 504 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) 505 505 #define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE) 506 + #define VMA_DROPPABLE mk_vma_flags(VMA_DROPPABLE_BIT) 506 507 #else 507 508 #define VM_DROPPABLE VM_NONE 509 + #define VMA_DROPPABLE EMPTY_VMA_FLAGS 508 510 #endif 509 511 510 512 /* Bits set in the VMA until the stack is in its final location */ 511 513 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 512 514 513 - #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 515 + #define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \ 516 + VMA_EXEC_BIT : VMA_READ_BIT) 514 517 515 518 /* Common data flag combinations */ 516 - #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 517 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 518 - #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 519 - VM_MAYWRITE | VM_MAYEXEC) 520 - #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 521 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 519 + #define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 520 + TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \ 521 + VMA_MAYEXEC_BIT) 522 + #define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 523 + VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT) 524 + #define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 525 + VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \ 526 + VMA_MAYEXEC_BIT) 522 527 523 - #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 524 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 528 + #ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */ 529 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC 525 530 #endif 526 531 527 - #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 528 - #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 532 + #ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */ 533 + #define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS 529 534 #endif 535 + 536 + #define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \ 537 + VMA_STACK_BIT, VMA_ACCOUNT_BIT) 538 + 539 + /* Temporary until VMA flags conversion complete. */ 540 + #define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS) 530 541 531 542 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 532 543 ··· 547 536 #define VM_SEALED_SYSMAP VM_NONE 548 537 #endif 549 538 550 - #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 551 - 552 539 /* VMA basic access permission flags */ 553 540 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 554 541 #define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT) ··· 554 545 /* 555 546 * Special vmas that are non-mergable, non-mlock()able. 556 547 */ 557 - #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 548 + 549 + #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \ 550 + VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT) 551 + #define VM_SPECIAL vma_flags_to_legacy(VMA_SPECIAL_FLAGS) 558 552 559 553 /* 560 554 * Physically remapped pages are special. Tell the ··· 1419 1407 * vm_area_desc object describing a proposed VMA, e.g.: 1420 1408 * 1421 1409 * vma_desc_set_flags(desc, VMA_IO_BIT, VMA_PFNMAP_BIT, VMA_DONTEXPAND_BIT, 1422 - * VMA_DONTDUMP_BIT); 1410 + * VMA_DONTDUMP_BIT); 1423 1411 */ 1424 1412 #define vma_desc_set_flags(desc, ...) \ 1425 1413 vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__)) ··· 4057 4045 extern struct file *get_mm_exe_file(struct mm_struct *mm); 4058 4046 extern struct file *get_task_exe_file(struct task_struct *task); 4059 4047 4060 - extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 4061 4048 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 4062 4049 4063 4050 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
+3
mm/internal.h
··· 1916 1916 return READ_ONCE(sysctl_max_map_count); 1917 1917 } 1918 1918 1919 + bool may_expand_vm(struct mm_struct *mm, const vma_flags_t *vma_flags, 1920 + unsigned long npages); 1921 + 1919 1922 #endif /* __MM_INTERNAL_H */
+23 -20
mm/ksm.c
··· 735 735 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 736 736 } 737 737 738 - static bool ksm_compatible(const struct file *file, vm_flags_t vm_flags) 738 + static bool ksm_compatible(const struct file *file, vma_flags_t vma_flags) 739 739 { 740 - if (vm_flags & (VM_SHARED | VM_MAYSHARE | VM_SPECIAL | 741 - VM_HUGETLB | VM_DROPPABLE)) 742 - return false; /* just ignore the advice */ 743 - 740 + /* Just ignore the advice. */ 741 + if (vma_flags_test_any(&vma_flags, VMA_SHARED_BIT, VMA_MAYSHARE_BIT, 742 + VMA_HUGETLB_BIT)) 743 + return false; 744 + if (vma_flags_test_single_mask(&vma_flags, VMA_DROPPABLE)) 745 + return false; 746 + if (vma_flags_test_any_mask(&vma_flags, VMA_SPECIAL_FLAGS)) 747 + return false; 744 748 if (file_is_dax(file)) 745 749 return false; 746 - 747 750 #ifdef VM_SAO 748 - if (vm_flags & VM_SAO) 751 + if (vma_flags_test(&vma_flags, VMA_SAO_BIT)) 749 752 return false; 750 753 #endif 751 754 #ifdef VM_SPARC_ADI 752 - if (vm_flags & VM_SPARC_ADI) 755 + if (vma_flags_test(&vma_flags, VMA_SPARC_ADI_BIT)) 753 756 return false; 754 757 #endif 755 758 ··· 761 758 762 759 static bool vma_ksm_compatible(struct vm_area_struct *vma) 763 760 { 764 - return ksm_compatible(vma->vm_file, vma->vm_flags); 761 + return ksm_compatible(vma->vm_file, vma->flags); 765 762 } 766 763 767 764 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, ··· 2828 2825 return 0; 2829 2826 } 2830 2827 2831 - static bool __ksm_should_add_vma(const struct file *file, vm_flags_t vm_flags) 2828 + static bool __ksm_should_add_vma(const struct file *file, vma_flags_t vma_flags) 2832 2829 { 2833 - if (vm_flags & VM_MERGEABLE) 2830 + if (vma_flags_test(&vma_flags, VMA_MERGEABLE_BIT)) 2834 2831 return false; 2835 2832 2836 - return ksm_compatible(file, vm_flags); 2833 + return ksm_compatible(file, vma_flags); 2837 2834 } 2838 2835 2839 2836 static void __ksm_add_vma(struct vm_area_struct *vma) 2840 2837 { 2841 - if (__ksm_should_add_vma(vma->vm_file, vma->vm_flags)) 2838 + if (__ksm_should_add_vma(vma->vm_file, vma->flags)) 2842 2839 vm_flags_set(vma, VM_MERGEABLE); 2843 2840 } 2844 2841 ··· 2863 2860 * 2864 2861 * @mm: Proposed VMA's mm_struct 2865 2862 * @file: Proposed VMA's file-backed mapping, if any. 2866 - * @vm_flags: Proposed VMA"s flags. 2863 + * @vma_flags: Proposed VMA"s flags. 2867 2864 * 2868 - * Returns: @vm_flags possibly updated to mark mergeable. 2865 + * Returns: @vma_flags possibly updated to mark mergeable. 2869 2866 */ 2870 - vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, 2871 - vm_flags_t vm_flags) 2867 + vma_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, 2868 + vma_flags_t vma_flags) 2872 2869 { 2873 2870 if (mm_flags_test(MMF_VM_MERGE_ANY, mm) && 2874 - __ksm_should_add_vma(file, vm_flags)) { 2875 - vm_flags |= VM_MERGEABLE; 2871 + __ksm_should_add_vma(file, vma_flags)) { 2872 + vma_flags_set(&vma_flags, VMA_MERGEABLE_BIT); 2876 2873 /* 2877 2874 * Generally, the flags here always include MMF_VM_MERGEABLE. 2878 2875 * However, in rare cases, this flag may be cleared by ksmd who ··· 2882 2879 __ksm_enter(mm); 2883 2880 } 2884 2881 2885 - return vm_flags; 2882 + return vma_flags; 2886 2883 } 2887 2884 2888 2885 static void ksm_add_vmas(struct mm_struct *mm)
+8 -5
mm/mmap.c
··· 192 192 193 193 brkvma = vma_prev_limit(&vmi, mm->start_brk); 194 194 /* Ok, looks good - let it rip. */ 195 - if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 195 + if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 196 + EMPTY_VMA_FLAGS) < 0) 196 197 goto out; 197 198 198 199 mm->brk = brk; ··· 1204 1203 1205 1204 int vm_brk_flags(unsigned long addr, unsigned long request, bool is_exec) 1206 1205 { 1207 - const vm_flags_t vm_flags = is_exec ? VM_EXEC : 0; 1206 + const vma_flags_t vma_flags = is_exec ? 1207 + mk_vma_flags(VMA_EXEC_BIT) : EMPTY_VMA_FLAGS; 1208 1208 struct mm_struct *mm = current->mm; 1209 1209 struct vm_area_struct *vma = NULL; 1210 1210 unsigned long len; ··· 1232 1230 goto munmap_failed; 1233 1231 1234 1232 vma = vma_prev(&vmi); 1235 - ret = do_brk_flags(&vmi, vma, addr, len, vm_flags); 1233 + ret = do_brk_flags(&vmi, vma, addr, len, vma_flags); 1236 1234 populate = ((mm->def_flags & VM_LOCKED) != 0); 1237 1235 mmap_write_unlock(mm); 1238 1236 userfaultfd_unmap_complete(mm, &uf); ··· 1330 1328 * Return true if the calling process may expand its vm space by the passed 1331 1329 * number of pages 1332 1330 */ 1333 - bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 1331 + bool may_expand_vm(struct mm_struct *mm, const vma_flags_t *vma_flags, 1332 + unsigned long npages) 1334 1333 { 1335 1334 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 1336 1335 return false; 1337 1336 1338 - if (is_data_mapping(flags) && 1337 + if (is_data_mapping_vma_flags(vma_flags) && 1339 1338 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 1340 1339 /* Workaround for Valgrind */ 1341 1340 if (rlimit(RLIMIT_DATA) == 0 &&
+28 -18
mm/mprotect.c
··· 697 697 unsigned long start, unsigned long end, vm_flags_t newflags) 698 698 { 699 699 struct mm_struct *mm = vma->vm_mm; 700 - vm_flags_t oldflags = READ_ONCE(vma->vm_flags); 700 + const vma_flags_t old_vma_flags = READ_ONCE(vma->flags); 701 + vma_flags_t new_vma_flags = legacy_to_vma_flags(newflags); 701 702 long nrpages = (end - start) >> PAGE_SHIFT; 702 703 unsigned int mm_cp_flags = 0; 703 704 unsigned long charged = 0; ··· 707 706 if (vma_is_sealed(vma)) 708 707 return -EPERM; 709 708 710 - if (newflags == oldflags) { 709 + if (vma_flags_same_pair(&old_vma_flags, &new_vma_flags)) { 711 710 *pprev = vma; 712 711 return 0; 713 712 } ··· 718 717 * uncommon case, so doesn't need to be very optimized. 719 718 */ 720 719 if (arch_has_pfn_modify_check() && 721 - (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) && 722 - (newflags & VM_ACCESS_FLAGS) == 0) { 720 + vma_flags_test_any(&old_vma_flags, VMA_PFNMAP_BIT, 721 + VMA_MIXEDMAP_BIT) && 722 + !vma_flags_test_any_mask(&new_vma_flags, VMA_ACCESS_FLAGS)) { 723 723 pgprot_t new_pgprot = vm_get_page_prot(newflags); 724 724 725 725 error = walk_page_range(current->mm, start, end, ··· 738 736 * hugetlb mapping were accounted for even if read-only so there is 739 737 * no need to account for them here. 740 738 */ 741 - if (newflags & VM_WRITE) { 739 + if (vma_flags_test(&new_vma_flags, VMA_WRITE_BIT)) { 742 740 /* Check space limits when area turns into data. */ 743 - if (!may_expand_vm(mm, newflags, nrpages) && 744 - may_expand_vm(mm, oldflags, nrpages)) 741 + if (!may_expand_vm(mm, &new_vma_flags, nrpages) && 742 + may_expand_vm(mm, &old_vma_flags, nrpages)) 745 743 return -ENOMEM; 746 - if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 747 - VM_SHARED|VM_NORESERVE))) { 744 + if (!vma_flags_test_any(&old_vma_flags, 745 + VMA_ACCOUNT_BIT, VMA_WRITE_BIT, VMA_HUGETLB_BIT, 746 + VMA_SHARED_BIT, VMA_NORESERVE_BIT)) { 748 747 charged = nrpages; 749 748 if (security_vm_enough_memory_mm(mm, charged)) 750 749 return -ENOMEM; 751 - newflags |= VM_ACCOUNT; 750 + vma_flags_set(&new_vma_flags, VMA_ACCOUNT_BIT); 752 751 } 753 - } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) && 754 - !vma->anon_vma) { 755 - newflags &= ~VM_ACCOUNT; 752 + } else if (vma_flags_test(&old_vma_flags, VMA_ACCOUNT_BIT) && 753 + vma_is_anonymous(vma) && !vma->anon_vma) { 754 + vma_flags_clear(&new_vma_flags, VMA_ACCOUNT_BIT); 756 755 } 757 756 757 + newflags = vma_flags_to_legacy(new_vma_flags); 758 758 vma = vma_modify_flags(vmi, *pprev, vma, start, end, &newflags); 759 759 if (IS_ERR(vma)) { 760 760 error = PTR_ERR(vma); 761 761 goto fail; 762 762 } 763 + new_vma_flags = legacy_to_vma_flags(newflags); 763 764 764 765 *pprev = vma; 765 766 ··· 778 773 779 774 change_protection(tlb, vma, start, end, mm_cp_flags); 780 775 781 - if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT)) 776 + if (vma_flags_test(&old_vma_flags, VMA_ACCOUNT_BIT) && 777 + !vma_flags_test(&new_vma_flags, VMA_ACCOUNT_BIT)) 782 778 vm_unacct_memory(nrpages); 783 779 784 780 /* 785 781 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major 786 782 * fault on access. 787 783 */ 788 - if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && 789 - (newflags & VM_WRITE)) { 790 - populate_vma_page_range(vma, start, end, NULL); 784 + if (vma_flags_test(&new_vma_flags, VMA_WRITE_BIT)) { 785 + const vma_flags_t mask = 786 + vma_flags_and(&old_vma_flags, VMA_WRITE_BIT, 787 + VMA_SHARED_BIT, VMA_LOCKED_BIT); 788 + 789 + if (vma_flags_same(&mask, VMA_LOCKED_BIT)) 790 + populate_vma_page_range(vma, start, end, NULL); 791 791 } 792 792 793 - vm_stat_account(mm, oldflags, -nrpages); 793 + vm_stat_account(mm, vma_flags_to_legacy(old_vma_flags), -nrpages); 794 794 vm_stat_account(mm, newflags, nrpages); 795 795 perf_event_mmap(vma); 796 796 return 0;
+3 -3
mm/mremap.c
··· 1472 1472 1473 1473 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 1474 1474 if (vrm->flags & MREMAP_DONTUNMAP) { 1475 - vm_flags_t vm_flags = vrm->vma->vm_flags; 1475 + vma_flags_t vma_flags = vrm->vma->flags; 1476 1476 unsigned long pages = vrm->old_len >> PAGE_SHIFT; 1477 1477 1478 - if (!may_expand_vm(mm, vm_flags, pages)) 1478 + if (!may_expand_vm(mm, &vma_flags, pages)) 1479 1479 return -ENOMEM; 1480 1480 } 1481 1481 ··· 1813 1813 if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta)) 1814 1814 return -EAGAIN; 1815 1815 1816 - if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) 1816 + if (!may_expand_vm(mm, &vma->flags, vrm->delta >> PAGE_SHIFT)) 1817 1817 return -ENOMEM; 1818 1818 1819 1819 return 0;
+19 -15
mm/vma.c
··· 2385 2385 2386 2386 static void update_ksm_flags(struct mmap_state *map) 2387 2387 { 2388 - map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags); 2388 + map->vma_flags = ksm_vma_flags(map->mm, map->file, map->vma_flags); 2389 2389 } 2390 2390 2391 2391 static void set_desc_from_map(struct vm_area_desc *desc, ··· 2446 2446 } 2447 2447 2448 2448 /* Check against address space limit. */ 2449 - if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages)) 2449 + if (!may_expand_vm(map->mm, &map->vma_flags, map->pglen - vms->nr_pages)) 2450 2450 return -ENOMEM; 2451 2451 2452 2452 /* Private writable mapping: check memory availability. */ ··· 2866 2866 return ret; 2867 2867 } 2868 2868 2869 - /* 2869 + /** 2870 2870 * do_brk_flags() - Increase the brk vma if the flags match. 2871 2871 * @vmi: The vma iterator 2872 2872 * @addr: The start address 2873 2873 * @len: The length of the increase 2874 2874 * @vma: The vma, 2875 - * @vm_flags: The VMA Flags 2875 + * @vma_flags: The VMA Flags 2876 2876 * 2877 2877 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 2878 2878 * do not match then create a new anonymous VMA. Eventually we may be able to 2879 2879 * do some brk-specific accounting here. 2880 + * 2881 + * Returns: %0 on success, or otherwise an error. 2880 2882 */ 2881 2883 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 2882 - unsigned long addr, unsigned long len, vm_flags_t vm_flags) 2884 + unsigned long addr, unsigned long len, vma_flags_t vma_flags) 2883 2885 { 2884 2886 struct mm_struct *mm = current->mm; 2885 2887 ··· 2889 2887 * Check against address space limits by the changed size 2890 2888 * Note: This happens *after* clearing old mappings in some code paths. 2891 2889 */ 2892 - vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2893 - vm_flags = ksm_vma_flags(mm, NULL, vm_flags); 2894 - if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) 2890 + vma_flags_set_mask(&vma_flags, VMA_DATA_DEFAULT_FLAGS); 2891 + vma_flags_set(&vma_flags, VMA_ACCOUNT_BIT); 2892 + vma_flags_set_mask(&vma_flags, mm->def_vma_flags); 2893 + 2894 + vma_flags = ksm_vma_flags(mm, NULL, vma_flags); 2895 + if (!may_expand_vm(mm, &vma_flags, len >> PAGE_SHIFT)) 2895 2896 return -ENOMEM; 2896 2897 2897 2898 if (mm->map_count > get_sysctl_max_map_count()) ··· 2908 2903 * occur after forking, so the expand will only happen on new VMAs. 2909 2904 */ 2910 2905 if (vma && vma->vm_end == addr) { 2911 - VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr)); 2906 + VMG_STATE(vmg, mm, vmi, addr, addr + len, vma_flags, PHYS_PFN(addr)); 2912 2907 2913 2908 vmg.prev = vma; 2914 2909 /* vmi is positioned at prev, which this mode expects. */ ··· 2929 2924 2930 2925 vma_set_anonymous(vma); 2931 2926 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 2932 - vm_flags_init(vma, vm_flags); 2933 - vma->vm_page_prot = vm_get_page_prot(vm_flags); 2927 + vma->flags = vma_flags; 2928 + vma->vm_page_prot = vm_get_page_prot(vma_flags_to_legacy(vma_flags)); 2934 2929 vma_start_write(vma); 2935 2930 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 2936 2931 goto mas_store_fail; ··· 2941 2936 perf_event_mmap(vma); 2942 2937 mm->total_vm += len >> PAGE_SHIFT; 2943 2938 mm->data_vm += len >> PAGE_SHIFT; 2944 - if (vm_flags & VM_LOCKED) 2939 + if (vma_flags_test(&vma_flags, VMA_LOCKED_BIT)) 2945 2940 mm->locked_vm += (len >> PAGE_SHIFT); 2946 2941 if (pgtable_supports_soft_dirty()) 2947 - vm_flags_set(vma, VM_SOFTDIRTY); 2942 + vma_set_flags(vma, VMA_SOFTDIRTY_BIT); 2948 2943 return 0; 2949 2944 2950 2945 mas_store_fail: ··· 3075 3070 unsigned long new_start; 3076 3071 3077 3072 /* address space limit tests */ 3078 - if (!may_expand_vm(mm, vma->vm_flags, grow)) 3073 + if (!may_expand_vm(mm, &vma->flags, grow)) 3079 3074 return -ENOMEM; 3080 3075 3081 3076 /* Stack limit test */ ··· 3293 3288 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3294 3289 { 3295 3290 unsigned long charged = vma_pages(vma); 3296 - 3297 3291 3298 3292 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3299 3293 return -ENOMEM;
+11 -3
mm/vma.h
··· 237 237 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 238 238 } 239 239 240 - #define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_) \ 240 + #define VMG_STATE(name, mm_, vmi_, start_, end_, vma_flags_, pgoff_) \ 241 241 struct vma_merge_struct name = { \ 242 242 .mm = mm_, \ 243 243 .vmi = vmi_, \ 244 244 .start = start_, \ 245 245 .end = end_, \ 246 - .vm_flags = vm_flags_, \ 246 + .vma_flags = vma_flags_, \ 247 247 .pgoff = pgoff_, \ 248 248 .state = VMA_MERGE_START, \ 249 249 } ··· 465 465 struct list_head *uf); 466 466 467 467 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 468 - unsigned long addr, unsigned long request, unsigned long flags); 468 + unsigned long addr, unsigned long request, 469 + vma_flags_t vma_flags); 469 470 470 471 unsigned long unmapped_area(struct vm_unmapped_area_info *info); 471 472 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); ··· 528 527 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 529 528 } 530 529 530 + static inline bool is_data_mapping_vma_flags(const vma_flags_t *vma_flags) 531 + { 532 + const vma_flags_t mask = vma_flags_and(vma_flags, 533 + VMA_WRITE_BIT, VMA_SHARED_BIT, VMA_STACK_BIT); 534 + 535 + return vma_flags_same(&mask, VMA_WRITE_BIT); 536 + } 531 537 532 538 static inline void vma_iter_config(struct vma_iterator *vmi, 533 539 unsigned long index, unsigned long last)
+3 -2
mm/vma_exec.c
··· 36 36 unsigned long new_start = old_start - shift; 37 37 unsigned long new_end = old_end - shift; 38 38 VMA_ITERATOR(vmi, mm, new_start); 39 - VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); 39 + VMG_STATE(vmg, mm, &vmi, new_start, old_end, EMPTY_VMA_FLAGS, 40 + vma->vm_pgoff); 40 41 struct vm_area_struct *next; 41 42 struct mmu_gather tlb; 42 43 PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length); ··· 136 135 * use STACK_TOP because that can depend on attributes which aren't 137 136 * configured yet. 138 137 */ 139 - BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); 138 + VM_WARN_ON_ONCE(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); 140 139 vma->vm_end = STACK_TOP_MAX; 141 140 vma->vm_start = vma->vm_end - PAGE_SIZE; 142 141 if (pgtable_supports_soft_dirty())
+3 -1
security/selinux/hooks.c
··· 7713 7713 7714 7714 static __init int selinux_init(void) 7715 7715 { 7716 + vma_flags_t data_default_flags = VMA_DATA_DEFAULT_FLAGS; 7717 + 7716 7718 pr_info("SELinux: Initializing.\n"); 7717 7719 7718 7720 memset(&selinux_state, 0, sizeof(selinux_state)); ··· 7731 7729 AUDIT_CFG_LSM_SECCTX_SUBJECT | 7732 7730 AUDIT_CFG_LSM_SECCTX_OBJECT); 7733 7731 7734 - default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC); 7732 + default_noexec = !vma_flags_test(&data_default_flags, VMA_EXEC_BIT); 7735 7733 if (!default_noexec) 7736 7734 pr_notice("SELinux: virtual memory is executable by default\n"); 7737 7735
-3
tools/testing/vma/include/custom.h
··· 95 95 { 96 96 return PAGE_SIZE; 97 97 } 98 - 99 - #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \ 100 - VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
+22 -18
tools/testing/vma/include/dup.h
··· 314 314 /* Bits set in the VMA until the stack is in its final location */ 315 315 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) 316 316 317 - #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 317 + #define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \ 318 + VM_EXEC_BIT : VM_READ_BIT) 318 319 319 320 /* Common data flag combinations */ 320 - #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 321 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 322 - #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 323 - VM_MAYWRITE | VM_MAYEXEC) 324 - #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 325 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 321 + #define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 322 + TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \ 323 + VMA_MAYEXEC_BIT) 324 + #define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 325 + VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT) 326 + #define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \ 327 + VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \ 328 + VMA_MAYEXEC_BIT) 326 329 327 - #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 328 - #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 330 + #ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */ 331 + #define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC 329 332 #endif 330 333 331 - #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 332 - #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 334 + #ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */ 335 + #define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS 333 336 #endif 337 + 338 + #define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \ 339 + VMA_STACK_BIT, VMA_ACCOUNT_BIT) 340 + /* Temporary until VMA flags conversion complete. */ 341 + #define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS) 334 342 335 343 #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) 336 - 337 - #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 338 344 339 345 /* VMA basic access permission flags */ 340 346 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) ··· 350 344 * Special vmas that are non-mergable, non-mlock()able. 351 345 */ 352 346 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 347 + 348 + #define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \ 349 + VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT) 353 350 354 351 #define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \ 355 352 VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT) ··· 365 356 366 357 /* This mask represents all the VMA flag bits used by mlock */ 367 358 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 368 - 369 - #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 370 - 371 - #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 372 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 373 359 374 360 #define RLIMIT_STACK 3 /* max stack size */ 375 361 #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+5 -4
tools/testing/vma/include/stubs.h
··· 101 101 return false; 102 102 } 103 103 104 - static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, 105 - const struct file *file, vm_flags_t vm_flags) 104 + static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm, 105 + const struct file *file, vma_flags_t vma_flags) 106 106 { 107 - return vm_flags; 107 + return vma_flags; 108 108 } 109 109 110 110 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn) ··· 239 239 return 0; 240 240 } 241 241 242 - static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, 242 + static inline bool may_expand_vm(struct mm_struct *mm, 243 + const vma_flags_t *vma_flags, 243 244 unsigned long npages) 244 245 { 245 246 return true;
+1 -2
tools/testing/vma/tests/merge.c
··· 1429 1429 { 1430 1430 vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, 1431 1431 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT); 1432 - vm_flags_t legacy_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1433 1432 struct mm_struct mm = {}; 1434 1433 VMA_ITERATOR(vmi, &mm, 0); 1435 1434 struct vm_area_struct *vma_prev, *vma; 1436 - VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, legacy_flags, 5); 1435 + VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5); 1437 1436 1438 1437 /* 1439 1438 * Place a VMA prior to the one we're expanding so we assert that we do