Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

tools/testing/vma: separate VMA userland tests into separate files

So far the userland VMA tests have been established as a rough expression
of what's been possible.

Adapt it into a more usable form by separating out tests and shared
helper functions.

Since we test functions that are declared statically in mm/vma.c, we make
use of the trick of #include'ing kernel C files directly.

In order for the tests to continue to function, we must therefore also
this way into the tests/ directory.

We try to keep as much shared logic actually modularised into a separate
compilation unit in shared.c, however the merge_existing() and
attach_vma() helpers rely on statically declared mm/vma.c functions so
these must be declared in main.c.

Link: https://lkml.kernel.org/r/a0455ccfe4fdcd1c962c64f76304f612e5662a4e.1769097829.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Yury Norov <ynorov@nvidia.com>
Cc: Chris Mason <clm@fb.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
6aacab30 53f1d936

+406 -335
+2 -2
tools/testing/vma/Makefile
··· 6 6 7 7 include ../shared/shared.mk 8 8 9 - OFILES = $(SHARED_OFILES) vma.o maple-shim.o 9 + OFILES = $(SHARED_OFILES) main.o shared.o maple-shim.o 10 10 TARGETS = vma 11 11 12 - vma.o: vma.c vma_internal.h ../../../mm/vma.c ../../../mm/vma_init.c ../../../mm/vma_exec.c ../../../mm/vma.h 12 + main.o: main.c shared.c shared.h vma_internal.h tests/merge.c tests/mmap.c tests/vma.c ../../../mm/vma.c ../../../mm/vma_init.c ../../../mm/vma_exec.c ../../../mm/vma.h 13 13 14 14 vma: $(OFILES) 15 15 $(CC) $(CFLAGS) -o $@ $(OFILES) $(LDLIBS)
+55
tools/testing/vma/main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #include "shared.h" 4 + /* 5 + * Directly import the VMA implementation here. Our vma_internal.h wrapper 6 + * provides userland-equivalent functionality for everything vma.c uses. 7 + */ 8 + #include "../../../mm/vma_init.c" 9 + #include "../../../mm/vma_exec.c" 10 + #include "../../../mm/vma.c" 11 + 12 + /* Tests are included directly so they can test static functions in mm/vma.c. */ 13 + #include "tests/merge.c" 14 + #include "tests/mmap.c" 15 + #include "tests/vma.c" 16 + 17 + /* Helper functions which utilise static kernel functions. */ 18 + 19 + struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg) 20 + { 21 + struct vm_area_struct *vma; 22 + 23 + vma = vma_merge_existing_range(vmg); 24 + if (vma) 25 + vma_assert_attached(vma); 26 + return vma; 27 + } 28 + 29 + int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) 30 + { 31 + int res; 32 + 33 + res = vma_link(mm, vma); 34 + if (!res) 35 + vma_assert_attached(vma); 36 + return res; 37 + } 38 + 39 + /* Main test running which invokes tests/ *.c runners. */ 40 + int main(void) 41 + { 42 + int num_tests = 0, num_fail = 0; 43 + 44 + maple_tree_init(); 45 + vma_state_init(); 46 + 47 + run_merge_tests(&num_tests, &num_fail); 48 + run_mmap_tests(&num_tests, &num_fail); 49 + run_vma_tests(&num_tests, &num_fail); 50 + 51 + printf("%d tests run, %d passed, %d failed.\n", 52 + num_tests, num_tests - num_fail, num_fail); 53 + 54 + return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; 55 + }
+131
tools/testing/vma/shared.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #include "shared.h" 4 + 5 + 6 + bool fail_prealloc; 7 + unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; 8 + unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; 9 + unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 10 + 11 + const struct vm_operations_struct vma_dummy_vm_ops; 12 + struct anon_vma dummy_anon_vma; 13 + struct task_struct __current; 14 + 15 + struct vm_area_struct *alloc_vma(struct mm_struct *mm, 16 + unsigned long start, unsigned long end, 17 + pgoff_t pgoff, vm_flags_t vm_flags) 18 + { 19 + struct vm_area_struct *vma = vm_area_alloc(mm); 20 + 21 + if (vma == NULL) 22 + return NULL; 23 + 24 + vma->vm_start = start; 25 + vma->vm_end = end; 26 + vma->vm_pgoff = pgoff; 27 + vm_flags_reset(vma, vm_flags); 28 + vma_assert_detached(vma); 29 + 30 + return vma; 31 + } 32 + 33 + void detach_free_vma(struct vm_area_struct *vma) 34 + { 35 + vma_mark_detached(vma); 36 + vm_area_free(vma); 37 + } 38 + 39 + struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, 40 + unsigned long start, unsigned long end, 41 + pgoff_t pgoff, vm_flags_t vm_flags) 42 + { 43 + struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); 44 + 45 + if (vma == NULL) 46 + return NULL; 47 + 48 + if (attach_vma(mm, vma)) { 49 + detach_free_vma(vma); 50 + return NULL; 51 + } 52 + 53 + /* 54 + * Reset this counter which we use to track whether writes have 55 + * begun. Linking to the tree will have caused this to be incremented, 56 + * which means we will get a false positive otherwise. 57 + */ 58 + vma->vm_lock_seq = UINT_MAX; 59 + 60 + return vma; 61 + } 62 + 63 + void reset_dummy_anon_vma(void) 64 + { 65 + dummy_anon_vma.was_cloned = false; 66 + dummy_anon_vma.was_unlinked = false; 67 + } 68 + 69 + int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi) 70 + { 71 + struct vm_area_struct *vma; 72 + int count = 0; 73 + 74 + fail_prealloc = false; 75 + reset_dummy_anon_vma(); 76 + 77 + vma_iter_set(vmi, 0); 78 + for_each_vma(*vmi, vma) { 79 + detach_free_vma(vma); 80 + count++; 81 + } 82 + 83 + mtree_destroy(&mm->mm_mt); 84 + mm->map_count = 0; 85 + return count; 86 + } 87 + 88 + bool vma_write_started(struct vm_area_struct *vma) 89 + { 90 + int seq = vma->vm_lock_seq; 91 + 92 + /* We reset after each check. */ 93 + vma->vm_lock_seq = UINT_MAX; 94 + 95 + /* The vma_start_write() stub simply increments this value. */ 96 + return seq > -1; 97 + } 98 + 99 + void __vma_set_dummy_anon_vma(struct vm_area_struct *vma, 100 + struct anon_vma_chain *avc, struct anon_vma *anon_vma) 101 + { 102 + vma->anon_vma = anon_vma; 103 + INIT_LIST_HEAD(&vma->anon_vma_chain); 104 + list_add(&avc->same_vma, &vma->anon_vma_chain); 105 + avc->anon_vma = vma->anon_vma; 106 + } 107 + 108 + void vma_set_dummy_anon_vma(struct vm_area_struct *vma, 109 + struct anon_vma_chain *avc) 110 + { 111 + __vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma); 112 + } 113 + 114 + struct task_struct *get_current(void) 115 + { 116 + return &__current; 117 + } 118 + 119 + unsigned long rlimit(unsigned int limit) 120 + { 121 + return (unsigned long)-1; 122 + } 123 + 124 + void vma_set_range(struct vm_area_struct *vma, 125 + unsigned long start, unsigned long end, 126 + pgoff_t pgoff) 127 + { 128 + vma->vm_start = start; 129 + vma->vm_end = end; 130 + vma->vm_pgoff = pgoff; 131 + }
+114
tools/testing/vma/shared.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #pragma once 4 + 5 + #include <stdbool.h> 6 + #include <stdio.h> 7 + #include <stdlib.h> 8 + 9 + #include "generated/bit-length.h" 10 + #include "maple-shared.h" 11 + #include "vma_internal.h" 12 + #include "../../../mm/vma.h" 13 + 14 + /* Simple test runner. Assumes local num_[fail, tests] counters. */ 15 + #define TEST(name) \ 16 + do { \ 17 + (*num_tests)++; \ 18 + if (!test_##name()) { \ 19 + (*num_fail)++; \ 20 + fprintf(stderr, "Test " #name " FAILED\n"); \ 21 + } \ 22 + } while (0) 23 + 24 + #define ASSERT_TRUE(_expr) \ 25 + do { \ 26 + if (!(_expr)) { \ 27 + fprintf(stderr, \ 28 + "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \ 29 + __FILE__, __LINE__, __FUNCTION__, #_expr); \ 30 + return false; \ 31 + } \ 32 + } while (0) 33 + 34 + #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr)) 35 + #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2)) 36 + #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2)) 37 + 38 + #define IS_SET(_val, _flags) ((_val & _flags) == _flags) 39 + 40 + extern bool fail_prealloc; 41 + 42 + /* Override vma_iter_prealloc() so we can choose to fail it. */ 43 + #define vma_iter_prealloc(vmi, vma) \ 44 + (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) 45 + 46 + #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536 47 + 48 + extern unsigned long mmap_min_addr; 49 + extern unsigned long dac_mmap_min_addr; 50 + extern unsigned long stack_guard_gap; 51 + 52 + extern const struct vm_operations_struct vma_dummy_vm_ops; 53 + extern struct anon_vma dummy_anon_vma; 54 + extern struct task_struct __current; 55 + 56 + /* 57 + * Helper function which provides a wrapper around a merge existing VMA 58 + * operation. 59 + * 60 + * Declared in main.c as uses static VMA function. 61 + */ 62 + struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg); 63 + 64 + /* 65 + * Helper function to allocate a VMA and link it to the tree. 66 + * 67 + * Declared in main.c as uses static VMA function. 68 + */ 69 + int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma); 70 + 71 + /* Helper function providing a dummy vm_ops->close() method.*/ 72 + static inline void dummy_close(struct vm_area_struct *) 73 + { 74 + } 75 + 76 + /* Helper function to simply allocate a VMA. */ 77 + struct vm_area_struct *alloc_vma(struct mm_struct *mm, 78 + unsigned long start, unsigned long end, 79 + pgoff_t pgoff, vm_flags_t vm_flags); 80 + 81 + /* Helper function to detach and free a VMA. */ 82 + void detach_free_vma(struct vm_area_struct *vma); 83 + 84 + /* Helper function to allocate a VMA and link it to the tree. */ 85 + struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, 86 + unsigned long start, unsigned long end, 87 + pgoff_t pgoff, vm_flags_t vm_flags); 88 + 89 + /* 90 + * Helper function to reset the dummy anon_vma to indicate it has not been 91 + * duplicated. 92 + */ 93 + void reset_dummy_anon_vma(void); 94 + 95 + /* 96 + * Helper function to remove all VMAs and destroy the maple tree associated with 97 + * a virtual address space. Returns a count of VMAs in the tree. 98 + */ 99 + int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi); 100 + 101 + /* Helper function to determine if VMA has had vma_start_write() performed. */ 102 + bool vma_write_started(struct vm_area_struct *vma); 103 + 104 + void __vma_set_dummy_anon_vma(struct vm_area_struct *vma, 105 + struct anon_vma_chain *avc, struct anon_vma *anon_vma); 106 + 107 + /* Provide a simple dummy VMA/anon_vma dummy setup for testing. */ 108 + void vma_set_dummy_anon_vma(struct vm_area_struct *vma, 109 + struct anon_vma_chain *avc); 110 + 111 + /* Helper function to specify a VMA's range. */ 112 + void vma_set_range(struct vm_area_struct *vma, 113 + unsigned long start, unsigned long end, 114 + pgoff_t pgoff);
+57
tools/testing/vma/tests/mmap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + static bool test_mmap_region_basic(void) 4 + { 5 + struct mm_struct mm = {}; 6 + unsigned long addr; 7 + struct vm_area_struct *vma; 8 + VMA_ITERATOR(vmi, &mm, 0); 9 + 10 + current->mm = &mm; 11 + 12 + /* Map at 0x300000, length 0x3000. */ 13 + addr = __mmap_region(NULL, 0x300000, 0x3000, 14 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 15 + 0x300, NULL); 16 + ASSERT_EQ(addr, 0x300000); 17 + 18 + /* Map at 0x250000, length 0x3000. */ 19 + addr = __mmap_region(NULL, 0x250000, 0x3000, 20 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 21 + 0x250, NULL); 22 + ASSERT_EQ(addr, 0x250000); 23 + 24 + /* Map at 0x303000, merging to 0x300000 of length 0x6000. */ 25 + addr = __mmap_region(NULL, 0x303000, 0x3000, 26 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 27 + 0x303, NULL); 28 + ASSERT_EQ(addr, 0x303000); 29 + 30 + /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */ 31 + addr = __mmap_region(NULL, 0x24d000, 0x3000, 32 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 33 + 0x24d, NULL); 34 + ASSERT_EQ(addr, 0x24d000); 35 + 36 + ASSERT_EQ(mm.map_count, 2); 37 + 38 + for_each_vma(vmi, vma) { 39 + if (vma->vm_start == 0x300000) { 40 + ASSERT_EQ(vma->vm_end, 0x306000); 41 + ASSERT_EQ(vma->vm_pgoff, 0x300); 42 + } else if (vma->vm_start == 0x24d000) { 43 + ASSERT_EQ(vma->vm_end, 0x253000); 44 + ASSERT_EQ(vma->vm_pgoff, 0x24d); 45 + } else { 46 + ASSERT_FALSE(true); 47 + } 48 + } 49 + 50 + cleanup_mm(&mm, &vmi); 51 + return true; 52 + } 53 + 54 + static void run_mmap_tests(int *num_tests, int *num_fail) 55 + { 56 + TEST(mmap_region_basic); 57 + }
+39
tools/testing/vma/tests/vma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + static bool test_copy_vma(void) 4 + { 5 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 6 + struct mm_struct mm = {}; 7 + bool need_locks = false; 8 + VMA_ITERATOR(vmi, &mm, 0); 9 + struct vm_area_struct *vma, *vma_new, *vma_next; 10 + 11 + /* Move backwards and do not merge. */ 12 + 13 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 14 + vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); 15 + ASSERT_NE(vma_new, vma); 16 + ASSERT_EQ(vma_new->vm_start, 0); 17 + ASSERT_EQ(vma_new->vm_end, 0x2000); 18 + ASSERT_EQ(vma_new->vm_pgoff, 0); 19 + vma_assert_attached(vma_new); 20 + 21 + cleanup_mm(&mm, &vmi); 22 + 23 + /* Move a VMA into position next to another and merge the two. */ 24 + 25 + vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); 26 + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags); 27 + vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); 28 + vma_assert_attached(vma_new); 29 + 30 + ASSERT_EQ(vma_new, vma_next); 31 + 32 + cleanup_mm(&mm, &vmi); 33 + return true; 34 + } 35 + 36 + static void run_vma_tests(int *num_tests, int *num_fail) 37 + { 38 + TEST(copy_vma); 39 + }
+8 -324
tools/testing/vma/vma.c tools/testing/vma/tests/merge.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 - #include <stdbool.h> 4 - #include <stdio.h> 5 - #include <stdlib.h> 6 - 7 - #include "generated/bit-length.h" 8 - 9 - #include "maple-shared.h" 10 - #include "vma_internal.h" 11 - 12 - /* Include so header guard set. */ 13 - #include "../../../mm/vma.h" 14 - 15 - static bool fail_prealloc; 16 - 17 - /* Then override vma_iter_prealloc() so we can choose to fail it. */ 18 - #define vma_iter_prealloc(vmi, vma) \ 19 - (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) 20 - 21 - #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536 22 - 23 - unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; 24 - unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; 25 - unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 26 - 27 - /* 28 - * Directly import the VMA implementation here. Our vma_internal.h wrapper 29 - * provides userland-equivalent functionality for everything vma.c uses. 30 - */ 31 - #include "../../../mm/vma_init.c" 32 - #include "../../../mm/vma_exec.c" 33 - #include "../../../mm/vma.c" 34 - 35 - const struct vm_operations_struct vma_dummy_vm_ops; 36 - static struct anon_vma dummy_anon_vma; 37 - 38 - #define ASSERT_TRUE(_expr) \ 39 - do { \ 40 - if (!(_expr)) { \ 41 - fprintf(stderr, \ 42 - "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \ 43 - __FILE__, __LINE__, __FUNCTION__, #_expr); \ 44 - return false; \ 45 - } \ 46 - } while (0) 47 - #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr)) 48 - #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2)) 49 - #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2)) 50 - 51 - #define IS_SET(_val, _flags) ((_val & _flags) == _flags) 52 - 53 - static struct task_struct __current; 54 - 55 - struct task_struct *get_current(void) 56 - { 57 - return &__current; 58 - } 59 - 60 - unsigned long rlimit(unsigned int limit) 61 - { 62 - return (unsigned long)-1; 63 - } 64 - 65 - /* Helper function to simply allocate a VMA. */ 66 - static struct vm_area_struct *alloc_vma(struct mm_struct *mm, 67 - unsigned long start, 68 - unsigned long end, 69 - pgoff_t pgoff, 70 - vm_flags_t vm_flags) 71 - { 72 - struct vm_area_struct *vma = vm_area_alloc(mm); 73 - 74 - if (vma == NULL) 75 - return NULL; 76 - 77 - vma->vm_start = start; 78 - vma->vm_end = end; 79 - vma->vm_pgoff = pgoff; 80 - vm_flags_reset(vma, vm_flags); 81 - vma_assert_detached(vma); 82 - 83 - return vma; 84 - } 85 - 86 - /* Helper function to allocate a VMA and link it to the tree. */ 87 - static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) 88 - { 89 - int res; 90 - 91 - res = vma_link(mm, vma); 92 - if (!res) 93 - vma_assert_attached(vma); 94 - return res; 95 - } 96 - 97 - static void detach_free_vma(struct vm_area_struct *vma) 98 - { 99 - vma_mark_detached(vma); 100 - vm_area_free(vma); 101 - } 102 - 103 - /* Helper function to allocate a VMA and link it to the tree. */ 104 - static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, 105 - unsigned long start, 106 - unsigned long end, 107 - pgoff_t pgoff, 108 - vm_flags_t vm_flags) 109 - { 110 - struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); 111 - 112 - if (vma == NULL) 113 - return NULL; 114 - 115 - if (attach_vma(mm, vma)) { 116 - detach_free_vma(vma); 117 - return NULL; 118 - } 119 - 120 - /* 121 - * Reset this counter which we use to track whether writes have 122 - * begun. Linking to the tree will have caused this to be incremented, 123 - * which means we will get a false positive otherwise. 124 - */ 125 - vma->vm_lock_seq = UINT_MAX; 126 - 127 - return vma; 128 - } 129 - 130 3 /* Helper function which provides a wrapper around a merge new VMA operation. */ 131 4 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) 132 5 { ··· 20 147 } 21 148 22 149 /* 23 - * Helper function which provides a wrapper around a merge existing VMA 24 - * operation. 25 - */ 26 - static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg) 27 - { 28 - struct vm_area_struct *vma; 29 - 30 - vma = vma_merge_existing_range(vmg); 31 - if (vma) 32 - vma_assert_attached(vma); 33 - return vma; 34 - } 35 - 36 - /* 37 150 * Helper function which provides a wrapper around the expansion of an existing 38 151 * VMA. 39 152 */ ··· 32 173 * Helper function to reset merge state the associated VMA iterator to a 33 174 * specified new range. 34 175 */ 35 - static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, 36 - unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags) 176 + void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, 177 + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags) 37 178 { 38 179 vma_iter_set(vmg->vmi, start); 39 180 ··· 56 197 57 198 /* Helper function to set both the VMG range and its anon_vma. */ 58 199 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start, 59 - unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, 60 - struct anon_vma *anon_vma) 200 + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, 201 + struct anon_vma *anon_vma) 61 202 { 62 203 vmg_set_range(vmg, start, end, pgoff, vm_flags); 63 204 vmg->anon_vma = anon_vma; ··· 70 211 * VMA, link it to the maple tree and return it. 71 212 */ 72 213 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, 73 - struct vma_merge_struct *vmg, 74 - unsigned long start, unsigned long end, 75 - pgoff_t pgoff, vm_flags_t vm_flags, 76 - bool *was_merged) 214 + struct vma_merge_struct *vmg, unsigned long start, 215 + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, 216 + bool *was_merged) 77 217 { 78 218 struct vm_area_struct *merged; 79 219 ··· 90 232 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE); 91 233 92 234 return alloc_and_link_vma(mm, start, end, pgoff, vm_flags); 93 - } 94 - 95 - /* 96 - * Helper function to reset the dummy anon_vma to indicate it has not been 97 - * duplicated. 98 - */ 99 - static void reset_dummy_anon_vma(void) 100 - { 101 - dummy_anon_vma.was_cloned = false; 102 - dummy_anon_vma.was_unlinked = false; 103 - } 104 - 105 - /* 106 - * Helper function to remove all VMAs and destroy the maple tree associated with 107 - * a virtual address space. Returns a count of VMAs in the tree. 108 - */ 109 - static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi) 110 - { 111 - struct vm_area_struct *vma; 112 - int count = 0; 113 - 114 - fail_prealloc = false; 115 - reset_dummy_anon_vma(); 116 - 117 - vma_iter_set(vmi, 0); 118 - for_each_vma(*vmi, vma) { 119 - detach_free_vma(vma); 120 - count++; 121 - } 122 - 123 - mtree_destroy(&mm->mm_mt); 124 - mm->map_count = 0; 125 - return count; 126 - } 127 - 128 - /* Helper function to determine if VMA has had vma_start_write() performed. */ 129 - static bool vma_write_started(struct vm_area_struct *vma) 130 - { 131 - int seq = vma->vm_lock_seq; 132 - 133 - /* We reset after each check. */ 134 - vma->vm_lock_seq = UINT_MAX; 135 - 136 - /* The vma_start_write() stub simply increments this value. */ 137 - return seq > -1; 138 - } 139 - 140 - /* Helper function providing a dummy vm_ops->close() method.*/ 141 - static void dummy_close(struct vm_area_struct *) 142 - { 143 - } 144 - 145 - static void __vma_set_dummy_anon_vma(struct vm_area_struct *vma, 146 - struct anon_vma_chain *avc, 147 - struct anon_vma *anon_vma) 148 - { 149 - vma->anon_vma = anon_vma; 150 - INIT_LIST_HEAD(&vma->anon_vma_chain); 151 - list_add(&avc->same_vma, &vma->anon_vma_chain); 152 - avc->anon_vma = vma->anon_vma; 153 - } 154 - 155 - static void vma_set_dummy_anon_vma(struct vm_area_struct *vma, 156 - struct anon_vma_chain *avc) 157 - { 158 - __vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma); 159 235 } 160 236 161 237 static bool test_simple_merge(void) ··· 1408 1616 return true; 1409 1617 } 1410 1618 1411 - static bool test_copy_vma(void) 1412 - { 1413 - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1414 - struct mm_struct mm = {}; 1415 - bool need_locks = false; 1416 - VMA_ITERATOR(vmi, &mm, 0); 1417 - struct vm_area_struct *vma, *vma_new, *vma_next; 1418 - 1419 - /* Move backwards and do not merge. */ 1420 - 1421 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1422 - vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); 1423 - ASSERT_NE(vma_new, vma); 1424 - ASSERT_EQ(vma_new->vm_start, 0); 1425 - ASSERT_EQ(vma_new->vm_end, 0x2000); 1426 - ASSERT_EQ(vma_new->vm_pgoff, 0); 1427 - vma_assert_attached(vma_new); 1428 - 1429 - cleanup_mm(&mm, &vmi); 1430 - 1431 - /* Move a VMA into position next to another and merge the two. */ 1432 - 1433 - vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); 1434 - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags); 1435 - vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); 1436 - vma_assert_attached(vma_new); 1437 - 1438 - ASSERT_EQ(vma_new, vma_next); 1439 - 1440 - cleanup_mm(&mm, &vmi); 1441 - return true; 1442 - } 1443 - 1444 1619 static bool test_expand_only_mode(void) 1445 1620 { 1446 1621 vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; ··· 1448 1689 return true; 1449 1690 } 1450 1691 1451 - static bool test_mmap_region_basic(void) 1692 + static void run_merge_tests(int *num_tests, int *num_fail) 1452 1693 { 1453 - struct mm_struct mm = {}; 1454 - unsigned long addr; 1455 - struct vm_area_struct *vma; 1456 - VMA_ITERATOR(vmi, &mm, 0); 1457 - 1458 - current->mm = &mm; 1459 - 1460 - /* Map at 0x300000, length 0x3000. */ 1461 - addr = __mmap_region(NULL, 0x300000, 0x3000, 1462 - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 1463 - 0x300, NULL); 1464 - ASSERT_EQ(addr, 0x300000); 1465 - 1466 - /* Map at 0x250000, length 0x3000. */ 1467 - addr = __mmap_region(NULL, 0x250000, 0x3000, 1468 - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 1469 - 0x250, NULL); 1470 - ASSERT_EQ(addr, 0x250000); 1471 - 1472 - /* Map at 0x303000, merging to 0x300000 of length 0x6000. */ 1473 - addr = __mmap_region(NULL, 0x303000, 0x3000, 1474 - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 1475 - 0x303, NULL); 1476 - ASSERT_EQ(addr, 0x303000); 1477 - 1478 - /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */ 1479 - addr = __mmap_region(NULL, 0x24d000, 0x3000, 1480 - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, 1481 - 0x24d, NULL); 1482 - ASSERT_EQ(addr, 0x24d000); 1483 - 1484 - ASSERT_EQ(mm.map_count, 2); 1485 - 1486 - for_each_vma(vmi, vma) { 1487 - if (vma->vm_start == 0x300000) { 1488 - ASSERT_EQ(vma->vm_end, 0x306000); 1489 - ASSERT_EQ(vma->vm_pgoff, 0x300); 1490 - } else if (vma->vm_start == 0x24d000) { 1491 - ASSERT_EQ(vma->vm_end, 0x253000); 1492 - ASSERT_EQ(vma->vm_pgoff, 0x24d); 1493 - } else { 1494 - ASSERT_FALSE(true); 1495 - } 1496 - } 1497 - 1498 - cleanup_mm(&mm, &vmi); 1499 - return true; 1500 - } 1501 - 1502 - int main(void) 1503 - { 1504 - int num_tests = 0, num_fail = 0; 1505 - 1506 - maple_tree_init(); 1507 - vma_state_init(); 1508 - 1509 - #define TEST(name) \ 1510 - do { \ 1511 - num_tests++; \ 1512 - if (!test_##name()) { \ 1513 - num_fail++; \ 1514 - fprintf(stderr, "Test " #name " FAILED\n"); \ 1515 - } \ 1516 - } while (0) 1517 - 1518 1694 /* Very simple tests to kick the tyres. */ 1519 1695 TEST(simple_merge); 1520 1696 TEST(simple_modify); ··· 1465 1771 TEST(dup_anon_vma); 1466 1772 TEST(vmi_prealloc_fail); 1467 1773 TEST(merge_extend); 1468 - TEST(copy_vma); 1469 1774 TEST(expand_only_mode); 1470 - 1471 - TEST(mmap_region_basic); 1472 - 1473 - #undef TEST 1474 - 1475 - printf("%d tests run, %d passed, %d failed.\n", 1476 - num_tests, num_tests - num_fail, num_fail); 1477 - 1478 - return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; 1479 1775 }
-9
tools/testing/vma/vma_internal.h
··· 1127 1127 atomic_inc(&mapping->i_mmap_writable); 1128 1128 } 1129 1129 1130 - static inline void vma_set_range(struct vm_area_struct *vma, 1131 - unsigned long start, unsigned long end, 1132 - pgoff_t pgoff) 1133 - { 1134 - vma->vm_start = start; 1135 - vma->vm_end = end; 1136 - vma->vm_pgoff = pgoff; 1137 - } 1138 - 1139 1130 static inline 1140 1131 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 1141 1132 {