Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0+ */
2
3#pragma once
4
5/* Forward declarations to avoid header cycle. */
6struct vm_area_struct;
7static inline void vma_start_write(struct vm_area_struct *vma);
8
9extern const struct vm_operations_struct vma_dummy_vm_ops;
10extern unsigned long stack_guard_gap;
11extern const struct vm_operations_struct vma_dummy_vm_ops;
12extern unsigned long rlimit(unsigned int limit);
13struct task_struct *get_current(void);
14
15#define MMF_HAS_MDWE 28
16#define current get_current()
17
18/*
19 * Define the task command name length as enum, then it can be visible to
20 * BPF programs.
21 */
22enum {
23 TASK_COMM_LEN = 16,
24};
25
26/* PARTIALLY implemented types. */
27struct mm_struct {
28 struct maple_tree mm_mt;
29 int map_count; /* number of VMAs */
30 unsigned long total_vm; /* Total pages mapped */
31 unsigned long locked_vm; /* Pages that have PG_mlocked set */
32 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
33 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
34 unsigned long stack_vm; /* VM_STACK */
35
36 union {
37 vm_flags_t def_flags;
38 vma_flags_t def_vma_flags;
39 };
40
41 mm_flags_t flags; /* Must use mm_flags_* helpers to access */
42};
43struct address_space {
44 struct rb_root_cached i_mmap;
45 unsigned long flags;
46 atomic_t i_mmap_writable;
47};
48struct file_operations {
49 int (*mmap)(struct file *, struct vm_area_struct *);
50 int (*mmap_prepare)(struct vm_area_desc *);
51};
52struct file {
53 struct address_space *f_mapping;
54 const struct file_operations *f_op;
55};
56struct anon_vma_chain {
57 struct anon_vma *anon_vma;
58 struct list_head same_vma;
59};
60struct task_struct {
61 char comm[TASK_COMM_LEN];
62 pid_t pid;
63 struct mm_struct *mm;
64
65 /* Used for emulating ABI behavior of previous Linux versions: */
66 unsigned int personality;
67};
68
69struct kref {
70 refcount_t refcount;
71};
72
73struct anon_vma_name {
74 struct kref kref;
75 /* The name needs to be at the end because it is dynamically sized. */
76 char name[];
77};
78
79/*
80 * Contains declarations that are DUPLICATED from kernel source in order to
81 * faciliate userland VMA testing.
82 *
83 * These must be kept in sync with kernel source.
84 */
85
86#define VMA_LOCK_OFFSET 0x40000000
87
88typedef struct { unsigned long v; } freeptr_t;
89
90#define VM_NONE 0x00000000
91
92typedef int __bitwise vma_flag_t;
93
94#define ACCESS_PRIVATE(p, member) ((p)->member)
95
96#define DECLARE_VMA_BIT(name, bitnum) \
97 VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
98#define DECLARE_VMA_BIT_ALIAS(name, aliased) \
99 VMA_ ## name ## _BIT = VMA_ ## aliased ## _BIT
100enum {
101 DECLARE_VMA_BIT(READ, 0),
102 DECLARE_VMA_BIT(WRITE, 1),
103 DECLARE_VMA_BIT(EXEC, 2),
104 DECLARE_VMA_BIT(SHARED, 3),
105 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
106 DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */
107 DECLARE_VMA_BIT(MAYWRITE, 5),
108 DECLARE_VMA_BIT(MAYEXEC, 6),
109 DECLARE_VMA_BIT(MAYSHARE, 7),
110 DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */
111#ifdef CONFIG_MMU
112 DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
113#else
114 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
115 DECLARE_VMA_BIT(MAYOVERLAY, 9),
116#endif /* CONFIG_MMU */
117 /* Page-ranges managed without "struct page", just pure PFN */
118 DECLARE_VMA_BIT(PFNMAP, 10),
119 DECLARE_VMA_BIT(MAYBE_GUARD, 11),
120 DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */
121 DECLARE_VMA_BIT(LOCKED, 13),
122 DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */
123 DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */
124 DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */
125 DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */
126 DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
127 DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
128 DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */
129 DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */
130 DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */
131 DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */
132 DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */
133 DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
134 DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */
135 DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */
136 DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */
137 DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */
138 DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
139 DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */
140 /* These bits are reused, we define specific uses below. */
141 DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
142 DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
143 DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
144 DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
145 DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
146 DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
147 DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
148 /*
149 * This flag is used to connect VFIO to arch specific KVM code. It
150 * indicates that the memory under this VMA is safe for use with any
151 * non-cachable memory type inside KVM. Some VFIO devices, on some
152 * platforms, are thought to be unsafe and can cause machine crashes
153 * if KVM does not lock down the memory type.
154 */
155 DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
156#ifdef CONFIG_PPC32
157 DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
158#else
159 DECLARE_VMA_BIT(DROPPABLE, 40),
160#endif
161 DECLARE_VMA_BIT(UFFD_MINOR, 41),
162 DECLARE_VMA_BIT(SEALED, 42),
163 /* Flags that reuse flags above. */
164 DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
165 DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
166 DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
167 DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
168 DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
169#if defined(CONFIG_X86_USER_SHADOW_STACK)
170 /*
171 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
172 * support core mm.
173 *
174 * These VMAs will get a single end guard page. This helps userspace
175 * protect itself from attacks. A single page is enough for current
176 * shadow stack archs (x86). See the comments near alloc_shstk() in
177 * arch/x86/kernel/shstk.c for more details on the guard size.
178 */
179 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
180#elif defined(CONFIG_ARM64_GCS)
181 /*
182 * arm64's Guarded Control Stack implements similar functionality and
183 * has similar constraints to shadow stacks.
184 */
185 DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
186#endif
187 DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */
188 DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */
189 DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */
190 DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */
191 DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */
192 DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */
193 DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */
194 DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
195#ifdef CONFIG_STACK_GROWSUP
196 DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
197 DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
198#else
199 DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
200#endif
201};
202
203#define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
204#define VM_READ INIT_VM_FLAG(READ)
205#define VM_WRITE INIT_VM_FLAG(WRITE)
206#define VM_EXEC INIT_VM_FLAG(EXEC)
207#define VM_SHARED INIT_VM_FLAG(SHARED)
208#define VM_MAYREAD INIT_VM_FLAG(MAYREAD)
209#define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE)
210#define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC)
211#define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE)
212#define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN)
213#ifdef CONFIG_MMU
214#define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING)
215#else
216#define VM_UFFD_MISSING VM_NONE
217#define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY)
218#endif
219#define VM_PFNMAP INIT_VM_FLAG(PFNMAP)
220#define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD)
221#define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP)
222#define VM_LOCKED INIT_VM_FLAG(LOCKED)
223#define VM_IO INIT_VM_FLAG(IO)
224#define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ)
225#define VM_RAND_READ INIT_VM_FLAG(RAND_READ)
226#define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY)
227#define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND)
228#define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT)
229#define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT)
230#define VM_NORESERVE INIT_VM_FLAG(NORESERVE)
231#define VM_HUGETLB INIT_VM_FLAG(HUGETLB)
232#define VM_SYNC INIT_VM_FLAG(SYNC)
233#define VM_ARCH_1 INIT_VM_FLAG(ARCH_1)
234#define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK)
235#define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP)
236#ifdef CONFIG_MEM_SOFT_DIRTY
237#define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY)
238#else
239#define VM_SOFTDIRTY VM_NONE
240#endif
241#define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP)
242#define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE)
243#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
244#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
245#define VM_STACK INIT_VM_FLAG(STACK)
246#ifdef CONFIG_STACK_GROWS_UP
247#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
248#else
249#define VM_STACK_EARLY VM_NONE
250#endif
251#ifdef CONFIG_ARCH_HAS_PKEYS
252#define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
253/* Despite the naming, these are FLAGS not bits. */
254#define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
255#define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
256#define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
257#if CONFIG_ARCH_PKEY_BITS > 3
258#define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
259#else
260#define VM_PKEY_BIT3 VM_NONE
261#endif /* CONFIG_ARCH_PKEY_BITS > 3 */
262#if CONFIG_ARCH_PKEY_BITS > 4
263#define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
264#else
265#define VM_PKEY_BIT4 VM_NONE
266#endif /* CONFIG_ARCH_PKEY_BITS > 4 */
267#endif /* CONFIG_ARCH_HAS_PKEYS */
268#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
269#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
270#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
271#else
272#define VM_SHADOW_STACK VM_NONE
273#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
274#endif
275#if defined(CONFIG_PPC64)
276#define VM_SAO INIT_VM_FLAG(SAO)
277#elif defined(CONFIG_PARISC)
278#define VM_GROWSUP INIT_VM_FLAG(GROWSUP)
279#elif defined(CONFIG_SPARC64)
280#define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI)
281#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR)
282#elif defined(CONFIG_ARM64)
283#define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI)
284#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR)
285#elif !defined(CONFIG_MMU)
286#define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY)
287#endif
288#ifndef VM_GROWSUP
289#define VM_GROWSUP VM_NONE
290#endif
291#ifdef CONFIG_ARM64_MTE
292#define VM_MTE INIT_VM_FLAG(MTE)
293#define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED)
294#else
295#define VM_MTE VM_NONE
296#define VM_MTE_ALLOWED VM_NONE
297#endif
298#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
299#define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR)
300#else
301#define VM_UFFD_MINOR VM_NONE
302#endif
303#ifdef CONFIG_64BIT
304#define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
305#define VM_SEALED INIT_VM_FLAG(SEALED)
306#else
307#define VM_ALLOW_ANY_UNCACHED VM_NONE
308#define VM_SEALED VM_NONE
309#endif
310#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
311#define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE)
312#else
313#define VM_DROPPABLE VM_NONE
314#endif
315
316/* Bits set in the VMA until the stack is in its final location */
317#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
318
319#define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
320 VM_EXEC_BIT : VM_READ_BIT)
321
322/* Common data flag combinations */
323#define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
324 TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
325 VMA_MAYEXEC_BIT)
326#define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
327 VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
328#define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
329 VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
330 VMA_MAYEXEC_BIT)
331
332#ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */
333#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC
334#endif
335
336#ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */
337#define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
338#endif
339
340#define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \
341 VMA_STACK_BIT, VMA_ACCOUNT_BIT)
342/* Temporary until VMA flags conversion complete. */
343#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
344
345#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
346
347/* VMA basic access permission flags */
348#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
349#define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
350
351/*
352 * Special vmas that are non-mergable, non-mlock()able.
353 */
354#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
355
356#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
357 VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
358
359#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \
360 VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
361
362#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
363#define TASK_SIZE_LOW DEFAULT_MAP_WINDOW
364#define TASK_SIZE_MAX DEFAULT_MAP_WINDOW
365#define STACK_TOP TASK_SIZE_LOW
366#define STACK_TOP_MAX TASK_SIZE_MAX
367
368/* This mask represents all the VMA flag bits used by mlock */
369#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
370
371#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
372
373#define RLIMIT_STACK 3 /* max stack size */
374#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
375
376#define CAP_IPC_LOCK 14
377
378#ifdef CONFIG_MEM_SOFT_DIRTY
379#define VMA_STICKY_FLAGS mk_vma_flags(VMA_SOFTDIRTY_BIT, VMA_MAYBE_GUARD_BIT)
380#else
381#define VMA_STICKY_FLAGS mk_vma_flags(VMA_MAYBE_GUARD_BIT)
382#endif
383
384#define VMA_IGNORE_MERGE_FLAGS VMA_STICKY_FLAGS
385
386#define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
387
388#define pgprot_val(x) ((x).pgprot)
389#define __pgprot(x) ((pgprot_t) { (x) } )
390
391#define for_each_vma(__vmi, __vma) \
392 while (((__vma) = vma_next(&(__vmi))) != NULL)
393
394/* The MM code likes to work with exclusive end addresses */
395#define for_each_vma_range(__vmi, __vma, __end) \
396 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
397
398#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
399
400#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
401
402#define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
403#define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
404
405#define AS_MM_ALL_LOCKS 2
406
407#define swap(a, b) \
408 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
409
410/*
411 * Flags for bug emulation.
412 *
413 * These occupy the top three bytes.
414 */
415enum {
416 READ_IMPLIES_EXEC = 0x0400000,
417};
418
419struct vma_iterator {
420 struct ma_state mas;
421};
422
423#define VMA_ITERATOR(name, __mm, __addr) \
424 struct vma_iterator name = { \
425 .mas = { \
426 .tree = &(__mm)->mm_mt, \
427 .index = __addr, \
428 .node = NULL, \
429 .status = ma_start, \
430 }, \
431 }
432
433#define DEFINE_MUTEX(mutexname) \
434 struct mutex mutexname = {}
435
436#define DECLARE_BITMAP(name, bits) \
437 unsigned long name[BITS_TO_LONGS(bits)]
438
439#define EMPTY_VMA_FLAGS ((vma_flags_t){ })
440
441#define MAPCOUNT_ELF_CORE_MARGIN (5)
442#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
443
444static __always_inline bool vma_flags_empty(const vma_flags_t *flags)
445{
446 const unsigned long *bitmap = flags->__vma_flags;
447
448 return bitmap_empty(bitmap, NUM_VMA_FLAG_BITS);
449}
450
451/* What action should be taken after an .mmap_prepare call is complete? */
452enum mmap_action_type {
453 MMAP_NOTHING, /* Mapping is complete, no further action. */
454 MMAP_REMAP_PFN, /* Remap PFN range. */
455 MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
456 MMAP_SIMPLE_IO_REMAP, /* I/O remap with guardrails. */
457 MMAP_MAP_KERNEL_PAGES, /* Map kernel page range from an array. */
458};
459
460/*
461 * Describes an action an mmap_prepare hook can instruct to be taken to complete
462 * the mapping of a VMA. Specified in vm_area_desc.
463 */
464struct mmap_action {
465 union {
466 struct {
467 unsigned long start;
468 unsigned long start_pfn;
469 unsigned long size;
470 pgprot_t pgprot;
471 } remap;
472 struct {
473 phys_addr_t start_phys_addr;
474 unsigned long size;
475 } simple_ioremap;
476 struct {
477 unsigned long start;
478 struct page **pages;
479 unsigned long nr_pages;
480 pgoff_t pgoff;
481 } map_kernel;
482 };
483 enum mmap_action_type type;
484
485 /*
486 * If specified, this hook is invoked after the selected action has been
487 * successfully completed. Note that the VMA write lock still held.
488 *
489 * The absolute minimum ought to be done here.
490 *
491 * Returns 0 on success, or an error code.
492 */
493 int (*success_hook)(const struct vm_area_struct *vma);
494
495 /*
496 * If specified, this hook is invoked when an error occurred when
497 * attempting the selection action.
498 *
499 * The hook can return an error code in order to filter the error, but
500 * it is not valid to clear the error here.
501 */
502 int (*error_hook)(int err);
503
504 /*
505 * This should be set in rare instances where the operation required
506 * that the rmap should not be able to access the VMA until
507 * completely set up.
508 */
509 bool hide_from_rmap_until_complete :1;
510};
511
512/* Operations which modify VMAs. */
513enum vma_operation {
514 VMA_OP_SPLIT,
515 VMA_OP_MERGE_UNFAULTED,
516 VMA_OP_REMAP,
517 VMA_OP_FORK,
518};
519
520/*
521 * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
522 * manipulate mutable fields which will cause those fields to be updated in the
523 * resultant VMA.
524 *
525 * Helper functions are not required for manipulating any field.
526 */
527struct vm_area_desc {
528 /* Immutable state. */
529 struct mm_struct *mm;
530 struct file *file; /* May vary from vm_file in stacked callers. */
531 unsigned long start;
532 unsigned long end;
533
534 /* Mutable fields. Populated with initial state. */
535 pgoff_t pgoff;
536 struct file *vm_file;
537 vma_flags_t vma_flags;
538 pgprot_t page_prot;
539
540 /* Write-only fields. */
541 const struct vm_operations_struct *vm_ops;
542 void *private_data;
543
544 /* Take further action? */
545 struct mmap_action action;
546};
547
548struct vm_area_struct {
549 /* The first cache line has the info for VMA tree walking. */
550
551 union {
552 struct {
553 /* VMA covers [vm_start; vm_end) addresses within mm */
554 unsigned long vm_start;
555 unsigned long vm_end;
556 };
557 freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
558 };
559
560 struct mm_struct *vm_mm; /* The address space we belong to. */
561 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
562
563 /*
564 * Flags, see mm.h.
565 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
566 */
567 union {
568 const vm_flags_t vm_flags;
569 vma_flags_t flags;
570 };
571
572#ifdef CONFIG_PER_VMA_LOCK
573 /*
574 * Can only be written (using WRITE_ONCE()) while holding both:
575 * - mmap_lock (in write mode)
576 * - vm_refcnt bit at VMA_LOCK_OFFSET is set
577 * Can be read reliably while holding one of:
578 * - mmap_lock (in read or write mode)
579 * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
580 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
581 * while holding nothing (except RCU to keep the VMA struct allocated).
582 *
583 * This sequence counter is explicitly allowed to overflow; sequence
584 * counter reuse can only lead to occasional unnecessary use of the
585 * slowpath.
586 */
587 unsigned int vm_lock_seq;
588#endif
589
590 /*
591 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
592 * list, after a COW of one of the file pages. A MAP_SHARED vma
593 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
594 * or brk vma (with NULL file) can only be in an anon_vma list.
595 */
596 struct list_head anon_vma_chain; /* Serialized by mmap_lock &
597 * page_table_lock */
598 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
599
600 /* Function pointers to deal with this struct. */
601 const struct vm_operations_struct *vm_ops;
602
603 /* Information about our backing store: */
604 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
605 units */
606 struct file * vm_file; /* File we map to (can be NULL). */
607 void * vm_private_data; /* was vm_pte (shared mem) */
608
609#ifdef CONFIG_SWAP
610 atomic_long_t swap_readahead_info;
611#endif
612#ifndef CONFIG_MMU
613 struct vm_region *vm_region; /* NOMMU mapping region */
614#endif
615#ifdef CONFIG_NUMA
616 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
617#endif
618#ifdef CONFIG_NUMA_BALANCING
619 struct vma_numab_state *numab_state; /* NUMA Balancing state */
620#endif
621#ifdef CONFIG_PER_VMA_LOCK
622 /* Unstable RCU readers are allowed to read this. */
623 refcount_t vm_refcnt;
624#endif
625 /*
626 * For areas with an address space and backing store,
627 * linkage into the address_space->i_mmap interval tree.
628 *
629 */
630 struct {
631 struct rb_node rb;
632 unsigned long rb_subtree_last;
633 } shared;
634#ifdef CONFIG_ANON_VMA_NAME
635 /*
636 * For private and shared anonymous mappings, a pointer to a null
637 * terminated string containing the name given to the vma, or NULL if
638 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
639 */
640 struct anon_vma_name *anon_name;
641#endif
642 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
643} __randomize_layout;
644
645struct vm_operations_struct {
646 /**
647 * @open: Called when a VMA is remapped, split or forked. Not called
648 * upon first mapping a VMA.
649 * Context: User context. May sleep. Caller holds mmap_lock.
650 */
651 void (*open)(struct vm_area_struct *vma);
652 /**
653 * @close: Called when the VMA is being removed from the MM.
654 * Context: User context. May sleep. Caller holds mmap_lock.
655 */
656 void (*close)(struct vm_area_struct *vma);
657 /**
658 * @mapped: Called when the VMA is first mapped in the MM. Not called if
659 * the new VMA is merged with an adjacent VMA.
660 *
661 * The @vm_private_data field is an output field allowing the user to
662 * modify vma->vm_private_data as necessary.
663 *
664 * ONLY valid if set from f_op->mmap_prepare. Will result in an error if
665 * set from f_op->mmap.
666 *
667 * Returns %0 on success, or an error otherwise. On error, the VMA will
668 * be unmapped.
669 *
670 * Context: User context. May sleep. Caller holds mmap_lock.
671 */
672 int (*mapped)(unsigned long start, unsigned long end, pgoff_t pgoff,
673 const struct file *file, void **vm_private_data);
674 /* Called any time before splitting to check if it's allowed */
675 int (*may_split)(struct vm_area_struct *vma, unsigned long addr);
676 int (*mremap)(struct vm_area_struct *vma);
677 /*
678 * Called by mprotect() to make driver-specific permission
679 * checks before mprotect() is finalised. The VMA must not
680 * be modified. Returns 0 if mprotect() can proceed.
681 */
682 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
683 unsigned long end, unsigned long newflags);
684 vm_fault_t (*fault)(struct vm_fault *vmf);
685 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
686 vm_fault_t (*map_pages)(struct vm_fault *vmf,
687 pgoff_t start_pgoff, pgoff_t end_pgoff);
688 unsigned long (*pagesize)(struct vm_area_struct *vma);
689
690 /* notification that a previously read-only page is about to become
691 * writable, if an error is returned it will cause a SIGBUS */
692 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
693
694 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
695 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
696
697 /* called by access_process_vm when get_user_pages() fails, typically
698 * for use by special VMAs. See also generic_access_phys() for a generic
699 * implementation useful for any iomem mapping.
700 */
701 int (*access)(struct vm_area_struct *vma, unsigned long addr,
702 void *buf, int len, int write);
703
704 /* Called by the /proc/PID/maps code to ask the vma whether it
705 * has a special name. Returning non-NULL will also cause this
706 * vma to be dumped unconditionally. */
707 const char *(*name)(struct vm_area_struct *vma);
708
709#ifdef CONFIG_NUMA
710 /*
711 * set_policy() op must add a reference to any non-NULL @new mempolicy
712 * to hold the policy upon return. Caller should pass NULL @new to
713 * remove a policy and fall back to surrounding context--i.e. do not
714 * install a MPOL_DEFAULT policy, nor the task or system default
715 * mempolicy.
716 */
717 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
718
719 /*
720 * get_policy() op must add reference [mpol_get()] to any policy at
721 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
722 * in mm/mempolicy.c will do this automatically.
723 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
724 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
725 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
726 * must return NULL--i.e., do not "fallback" to task or system default
727 * policy.
728 */
729 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
730 unsigned long addr, pgoff_t *ilx);
731#endif
732#ifdef CONFIG_FIND_NORMAL_PAGE
733 /*
734 * Called by vm_normal_page() for special PTEs in @vma at @addr. This
735 * allows for returning a "normal" page from vm_normal_page() even
736 * though the PTE indicates that the "struct page" either does not exist
737 * or should not be touched: "special".
738 *
739 * Do not add new users: this really only works when a "normal" page
740 * was mapped, but then the PTE got changed to something weird (+
741 * marked special) that would not make pte_pfn() identify the originally
742 * inserted page.
743 */
744 struct page *(*find_normal_page)(struct vm_area_struct *vma,
745 unsigned long addr);
746#endif /* CONFIG_FIND_NORMAL_PAGE */
747};
748
749struct vm_unmapped_area_info {
750#define VM_UNMAPPED_AREA_TOPDOWN 1
751 unsigned long flags;
752 unsigned long length;
753 unsigned long low_limit;
754 unsigned long high_limit;
755 unsigned long align_mask;
756 unsigned long align_offset;
757 unsigned long start_gap;
758};
759
760struct pagetable_move_control {
761 struct vm_area_struct *old; /* Source VMA. */
762 struct vm_area_struct *new; /* Destination VMA. */
763 unsigned long old_addr; /* Address from which the move begins. */
764 unsigned long old_end; /* Exclusive address at which old range ends. */
765 unsigned long new_addr; /* Address to move page tables to. */
766 unsigned long len_in; /* Bytes to remap specified by user. */
767
768 bool need_rmap_locks; /* Do rmap locks need to be taken? */
769 bool for_stack; /* Is this an early temp stack being moved? */
770};
771
772#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
773 struct pagetable_move_control name = { \
774 .old = old_, \
775 .new = new_, \
776 .old_addr = old_addr_, \
777 .old_end = (old_addr_) + (len_), \
778 .new_addr = new_addr_, \
779 .len_in = len_, \
780 }
781
782static inline void vma_iter_invalidate(struct vma_iterator *vmi)
783{
784 mas_pause(&vmi->mas);
785}
786
787static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
788{
789 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
790}
791
792static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
793{
794 return __pgprot(vm_flags);
795}
796
797static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
798{
799 return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
800}
801
802/*
803 * Copy value to the first system word of VMA flags, non-atomically.
804 *
805 * IMPORTANT: This does not overwrite bytes past the first system word. The
806 * caller must account for this.
807 */
808static __always_inline void vma_flags_overwrite_word(vma_flags_t *flags,
809 unsigned long value)
810{
811 unsigned long *bitmap = flags->__vma_flags;
812
813 bitmap[0] = value;
814}
815
816/*
817 * Copy value to the first system word of VMA flags ONCE, non-atomically.
818 *
819 * IMPORTANT: This does not overwrite bytes past the first system word. The
820 * caller must account for this.
821 */
822static __always_inline void vma_flags_overwrite_word_once(vma_flags_t *flags,
823 unsigned long value)
824{
825 unsigned long *bitmap = flags->__vma_flags;
826
827 WRITE_ONCE(*bitmap, value);
828}
829
830/* Update the first system word of VMA flags setting bits, non-atomically. */
831static __always_inline void vma_flags_set_word(vma_flags_t *flags,
832 unsigned long value)
833{
834 unsigned long *bitmap = flags->__vma_flags;
835
836 *bitmap |= value;
837}
838
839/* Update the first system word of VMA flags clearing bits, non-atomically. */
840static __always_inline void vma_flags_clear_word(vma_flags_t *flags,
841 unsigned long value)
842{
843 unsigned long *bitmap = flags->__vma_flags;
844
845 *bitmap &= ~value;
846}
847
848static __always_inline void vma_flags_clear_all(vma_flags_t *flags)
849{
850 bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
851}
852
853/*
854 * Helper function which converts a vma_flags_t value to a legacy vm_flags_t
855 * value. This is only valid if the input flags value can be expressed in a
856 * system word.
857 *
858 * Will be removed once the conversion to VMA flags is complete.
859 */
860static __always_inline vm_flags_t vma_flags_to_legacy(vma_flags_t flags)
861{
862 return (vm_flags_t)flags.__vma_flags[0];
863}
864
865/*
866 * Helper function which converts a legacy vm_flags_t value to a vma_flags_t
867 * value.
868 *
869 * Will be removed once the conversion to VMA flags is complete.
870 */
871static __always_inline vma_flags_t legacy_to_vma_flags(vm_flags_t flags)
872{
873 vma_flags_t ret = EMPTY_VMA_FLAGS;
874
875 vma_flags_overwrite_word(&ret, flags);
876 return ret;
877}
878
879static __always_inline void vma_flags_set_flag(vma_flags_t *flags,
880 vma_flag_t bit)
881{
882 unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
883
884 __set_bit((__force int)bit, bitmap);
885}
886
887/* Use when VMA is not part of the VMA tree and needs no locking */
888static inline void vm_flags_init(struct vm_area_struct *vma,
889 vm_flags_t flags)
890{
891 vma_flags_clear_all(&vma->flags);
892 vma_flags_overwrite_word(&vma->flags, flags);
893}
894
895/*
896 * Use when VMA is part of the VMA tree and modifications need coordination
897 * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
898 * it should be locked explicitly beforehand.
899 */
900static inline void vm_flags_reset(struct vm_area_struct *vma,
901 vm_flags_t flags)
902{
903 vma_assert_write_locked(vma);
904 vm_flags_init(vma, flags);
905}
906
907static inline void vma_flags_reset_once(struct vm_area_struct *vma,
908 vma_flags_t *flags)
909{
910 const unsigned long word = flags->__vma_flags[0];
911
912 /* It is assumed only the first system word must be written once. */
913 vma_flags_overwrite_word_once(&vma->flags, word);
914 /* The remainder can be copied normally. */
915 if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
916 unsigned long *dst = &vma->flags.__vma_flags[1];
917 const unsigned long *src = &flags->__vma_flags[1];
918
919 bitmap_copy(dst, src, NUM_VMA_FLAG_BITS - BITS_PER_LONG);
920 }
921}
922
923static inline void vm_flags_set(struct vm_area_struct *vma,
924 vm_flags_t flags)
925{
926 vma_start_write(vma);
927 vma_flags_set_word(&vma->flags, flags);
928}
929
930static inline void vm_flags_clear(struct vm_area_struct *vma,
931 vm_flags_t flags)
932{
933 vma_start_write(vma);
934 vma_flags_clear_word(&vma->flags, flags);
935}
936
937static __always_inline vma_flags_t __mk_vma_flags(vma_flags_t flags,
938 size_t count, const vma_flag_t *bits)
939{
940 int i;
941
942 for (i = 0; i < count; i++)
943 vma_flags_set_flag(&flags, bits[i]);
944 return flags;
945}
946
947#define mk_vma_flags(...) __mk_vma_flags(EMPTY_VMA_FLAGS, \
948 COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
949
950#define append_vma_flags(flags, ...) __mk_vma_flags(flags, \
951 COUNT_ARGS(__VA_ARGS__), (const vma_flag_t []){__VA_ARGS__})
952
953static __always_inline int vma_flags_count(const vma_flags_t *flags)
954{
955 const unsigned long *bitmap = flags->__vma_flags;
956
957 return bitmap_weight(bitmap, NUM_VMA_FLAG_BITS);
958}
959
960static __always_inline bool vma_flags_test(const vma_flags_t *flags,
961 vma_flag_t bit)
962{
963 const unsigned long *bitmap = flags->__vma_flags;
964
965 return test_bit((__force int)bit, bitmap);
966}
967
968static __always_inline vma_flags_t vma_flags_and_mask(const vma_flags_t *flags,
969 vma_flags_t to_and)
970{
971 vma_flags_t dst;
972 unsigned long *bitmap_dst = dst.__vma_flags;
973 const unsigned long *bitmap = flags->__vma_flags;
974 const unsigned long *bitmap_to_and = to_and.__vma_flags;
975
976 bitmap_and(bitmap_dst, bitmap, bitmap_to_and, NUM_VMA_FLAG_BITS);
977 return dst;
978}
979
980#define vma_flags_and(flags, ...) \
981 vma_flags_and_mask(flags, mk_vma_flags(__VA_ARGS__))
982
983static __always_inline bool vma_flags_test_any_mask(const vma_flags_t *flags,
984 vma_flags_t to_test)
985{
986 const unsigned long *bitmap = flags->__vma_flags;
987 const unsigned long *bitmap_to_test = to_test.__vma_flags;
988
989 return bitmap_intersects(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
990}
991
992#define vma_flags_test_any(flags, ...) \
993 vma_flags_test_any_mask(flags, mk_vma_flags(__VA_ARGS__))
994
995static __always_inline bool vma_flags_test_all_mask(const vma_flags_t *flags,
996 vma_flags_t to_test)
997{
998 const unsigned long *bitmap = flags->__vma_flags;
999 const unsigned long *bitmap_to_test = to_test.__vma_flags;
1000
1001 return bitmap_subset(bitmap_to_test, bitmap, NUM_VMA_FLAG_BITS);
1002}
1003
1004#define vma_flags_test_all(flags, ...) \
1005 vma_flags_test_all_mask(flags, mk_vma_flags(__VA_ARGS__))
1006
1007static __always_inline bool vma_flags_test_single_mask(const vma_flags_t *flags,
1008 vma_flags_t flagmask)
1009{
1010 VM_WARN_ON_ONCE(vma_flags_count(&flagmask) > 1);
1011
1012 return vma_flags_test_any_mask(flags, flagmask);
1013}
1014
1015static __always_inline void vma_flags_set_mask(vma_flags_t *flags, vma_flags_t to_set)
1016{
1017 unsigned long *bitmap = flags->__vma_flags;
1018 const unsigned long *bitmap_to_set = to_set.__vma_flags;
1019
1020 bitmap_or(bitmap, bitmap, bitmap_to_set, NUM_VMA_FLAG_BITS);
1021}
1022
1023#define vma_flags_set(flags, ...) \
1024 vma_flags_set_mask(flags, mk_vma_flags(__VA_ARGS__))
1025
1026static __always_inline void vma_flags_clear_mask(vma_flags_t *flags, vma_flags_t to_clear)
1027{
1028 unsigned long *bitmap = flags->__vma_flags;
1029 const unsigned long *bitmap_to_clear = to_clear.__vma_flags;
1030
1031 bitmap_andnot(bitmap, bitmap, bitmap_to_clear, NUM_VMA_FLAG_BITS);
1032}
1033
1034#define vma_flags_clear(flags, ...) \
1035 vma_flags_clear_mask(flags, mk_vma_flags(__VA_ARGS__))
1036
1037static __always_inline vma_flags_t vma_flags_diff_pair(const vma_flags_t *flags,
1038 const vma_flags_t *flags_other)
1039{
1040 vma_flags_t dst;
1041 const unsigned long *bitmap_other = flags_other->__vma_flags;
1042 const unsigned long *bitmap = flags->__vma_flags;
1043 unsigned long *bitmap_dst = dst.__vma_flags;
1044
1045 bitmap_xor(bitmap_dst, bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1046 return dst;
1047}
1048
1049static __always_inline bool vma_flags_same_pair(const vma_flags_t *flags,
1050 const vma_flags_t *flags_other)
1051{
1052 const unsigned long *bitmap = flags->__vma_flags;
1053 const unsigned long *bitmap_other = flags_other->__vma_flags;
1054
1055 return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1056}
1057
1058static __always_inline bool vma_flags_same_mask(const vma_flags_t *flags,
1059 vma_flags_t flags_other)
1060{
1061 const unsigned long *bitmap = flags->__vma_flags;
1062 const unsigned long *bitmap_other = flags_other.__vma_flags;
1063
1064 return bitmap_equal(bitmap, bitmap_other, NUM_VMA_FLAG_BITS);
1065}
1066
1067#define vma_flags_same(flags, ...) \
1068 vma_flags_same_mask(flags, mk_vma_flags(__VA_ARGS__))
1069
1070static __always_inline bool vma_test(const struct vm_area_struct *vma,
1071 vma_flag_t bit)
1072{
1073 return vma_flags_test(&vma->flags, bit);
1074}
1075
1076static __always_inline bool vma_test_any_mask(const struct vm_area_struct *vma,
1077 vma_flags_t flags)
1078{
1079 return vma_flags_test_any_mask(&vma->flags, flags);
1080}
1081
1082#define vma_test_any(vma, ...) \
1083 vma_test_any_mask(vma, mk_vma_flags(__VA_ARGS__))
1084
1085static __always_inline bool vma_test_all_mask(const struct vm_area_struct *vma,
1086 vma_flags_t flags)
1087{
1088 return vma_flags_test_all_mask(&vma->flags, flags);
1089}
1090
1091#define vma_test_all(vma, ...) \
1092 vma_test_all_mask(vma, mk_vma_flags(__VA_ARGS__))
1093
1094static __always_inline bool
1095vma_test_single_mask(const struct vm_area_struct *vma, vma_flags_t flagmask)
1096{
1097 return vma_flags_test_single_mask(&vma->flags, flagmask);
1098}
1099
1100static __always_inline void vma_set_flags_mask(struct vm_area_struct *vma,
1101 vma_flags_t flags)
1102{
1103 vma_flags_set_mask(&vma->flags, flags);
1104}
1105
1106#define vma_set_flags(vma, ...) \
1107 vma_set_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1108
1109static __always_inline void vma_clear_flags_mask(struct vm_area_struct *vma,
1110 vma_flags_t flags)
1111{
1112 vma_flags_clear_mask(&vma->flags, flags);
1113}
1114
1115#define vma_clear_flags(vma, ...) \
1116 vma_clear_flags_mask(vma, mk_vma_flags(__VA_ARGS__))
1117
1118static __always_inline bool vma_desc_test(const struct vm_area_desc *desc,
1119 vma_flag_t bit)
1120{
1121 return vma_flags_test(&desc->vma_flags, bit);
1122}
1123
1124static __always_inline bool vma_desc_test_any_mask(const struct vm_area_desc *desc,
1125 vma_flags_t flags)
1126{
1127 return vma_flags_test_any_mask(&desc->vma_flags, flags);
1128}
1129
1130#define vma_desc_test_any(desc, ...) \
1131 vma_desc_test_any_mask(desc, mk_vma_flags(__VA_ARGS__))
1132
1133static __always_inline bool vma_desc_test_all_mask(const struct vm_area_desc *desc,
1134 vma_flags_t flags)
1135{
1136 return vma_flags_test_all_mask(&desc->vma_flags, flags);
1137}
1138
1139#define vma_desc_test_all(desc, ...) \
1140 vma_desc_test_all_mask(desc, mk_vma_flags(__VA_ARGS__))
1141
1142static __always_inline void vma_desc_set_flags_mask(struct vm_area_desc *desc,
1143 vma_flags_t flags)
1144{
1145 vma_flags_set_mask(&desc->vma_flags, flags);
1146}
1147
1148#define vma_desc_set_flags(desc, ...) \
1149 vma_desc_set_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1150
1151static __always_inline void vma_desc_clear_flags_mask(struct vm_area_desc *desc,
1152 vma_flags_t flags)
1153{
1154 vma_flags_clear_mask(&desc->vma_flags, flags);
1155}
1156
1157#define vma_desc_clear_flags(desc, ...) \
1158 vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
1159
1160static inline bool is_shared_maywrite(const vma_flags_t *flags)
1161{
1162 return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
1163}
1164
1165static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1166{
1167 return is_shared_maywrite(&vma->flags);
1168}
1169
1170static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1171{
1172 /*
1173 * Uses mas_find() to get the first VMA when the iterator starts.
1174 * Calling mas_next() could skip the first entry.
1175 */
1176 return mas_find(&vmi->mas, ULONG_MAX);
1177}
1178
1179/*
1180 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
1181 * assertions should be made either under mmap_write_lock or when the object
1182 * has been isolated under mmap_write_lock, ensuring no competing writers.
1183 */
1184static inline void vma_assert_attached(struct vm_area_struct *vma)
1185{
1186 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
1187}
1188
1189static inline void vma_assert_detached(struct vm_area_struct *vma)
1190{
1191 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
1192}
1193
1194static inline void vma_assert_write_locked(struct vm_area_struct *);
1195static inline void vma_mark_attached(struct vm_area_struct *vma)
1196{
1197 vma_assert_write_locked(vma);
1198 vma_assert_detached(vma);
1199 refcount_set_release(&vma->vm_refcnt, 1);
1200}
1201
1202static inline void vma_mark_detached(struct vm_area_struct *vma)
1203{
1204 vma_assert_write_locked(vma);
1205 vma_assert_attached(vma);
1206 /* We are the only writer, so no need to use vma_refcount_put(). */
1207 if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
1208 /*
1209 * Reader must have temporarily raised vm_refcnt but it will
1210 * drop it without using the vma since vma is write-locked.
1211 */
1212 }
1213}
1214
1215static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
1216{
1217 memset(vma, 0, sizeof(*vma));
1218 vma->vm_mm = mm;
1219 vma->vm_ops = &vma_dummy_vm_ops;
1220 INIT_LIST_HEAD(&vma->anon_vma_chain);
1221 vma->vm_lock_seq = UINT_MAX;
1222}
1223
1224/*
1225 * These are defined in vma.h, but sadly vm_stat_account() is referenced by
1226 * kernel/fork.c, so we have to these broadly available there, and temporarily
1227 * define them here to resolve the dependency cycle.
1228 */
1229#define is_exec_mapping(flags) \
1230 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
1231
1232#define is_stack_mapping(flags) \
1233 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
1234
1235#define is_data_mapping(flags) \
1236 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
1237
1238static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
1239 long npages)
1240{
1241 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1242
1243 if (is_exec_mapping(flags))
1244 mm->exec_vm += npages;
1245 else if (is_stack_mapping(flags))
1246 mm->stack_vm += npages;
1247 else if (is_data_mapping(flags))
1248 mm->data_vm += npages;
1249}
1250
1251#undef is_exec_mapping
1252#undef is_stack_mapping
1253#undef is_data_mapping
1254
1255static inline void vm_unacct_memory(long pages)
1256{
1257 vm_acct_memory(-pages);
1258}
1259
1260static inline void mapping_allow_writable(struct address_space *mapping)
1261{
1262 atomic_inc(&mapping->i_mmap_writable);
1263}
1264
1265static inline
1266struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1267{
1268 return mas_find(&vmi->mas, max - 1);
1269}
1270
1271static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1272 unsigned long start, unsigned long end, gfp_t gfp)
1273{
1274 __mas_set_range(&vmi->mas, start, end - 1);
1275 mas_store_gfp(&vmi->mas, NULL, gfp);
1276 if (unlikely(mas_is_err(&vmi->mas)))
1277 return -ENOMEM;
1278
1279 return 0;
1280}
1281
1282static inline void vma_set_anonymous(struct vm_area_struct *vma)
1283{
1284 vma->vm_ops = NULL;
1285}
1286
1287/* Declared in vma.h. */
1288static inline void compat_set_vma_from_desc(struct vm_area_struct *vma,
1289 struct vm_area_desc *desc);
1290
1291static inline void compat_set_desc_from_vma(struct vm_area_desc *desc,
1292 const struct file *file,
1293 const struct vm_area_struct *vma)
1294{
1295 memset(desc, 0, sizeof(*desc));
1296
1297 desc->mm = vma->vm_mm;
1298 desc->file = (struct file *)file;
1299 desc->start = vma->vm_start;
1300 desc->end = vma->vm_end;
1301
1302 desc->pgoff = vma->vm_pgoff;
1303 desc->vm_file = vma->vm_file;
1304 desc->vma_flags = vma->flags;
1305 desc->page_prot = vma->vm_page_prot;
1306
1307 /* Default. */
1308 desc->action.type = MMAP_NOTHING;
1309}
1310
1311static inline unsigned long vma_pages(const struct vm_area_struct *vma)
1312{
1313 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1314}
1315
1316static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
1317{
1318 return file->f_op->mmap_prepare(desc);
1319}
1320
1321static inline int __compat_vma_mmap(struct vm_area_desc *desc,
1322 struct vm_area_struct *vma)
1323{
1324 int err;
1325
1326 /* Perform any preparatory tasks for mmap action. */
1327 err = mmap_action_prepare(desc);
1328 if (err)
1329 return err;
1330 /* Update the VMA from the descriptor. */
1331 compat_set_vma_from_desc(vma, desc);
1332 /* Complete any specified mmap actions. */
1333 return mmap_action_complete(vma, &desc->action, /*is_compat=*/true);
1334}
1335
1336static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
1337{
1338 struct vm_area_desc desc;
1339 struct mmap_action *action;
1340 int err;
1341
1342 compat_set_desc_from_vma(&desc, file, vma);
1343 err = vfs_mmap_prepare(file, &desc);
1344 if (err)
1345 return err;
1346 action = &desc.action;
1347
1348 /* being invoked from .mmmap means we don't have to enforce this. */
1349 action->hide_from_rmap_until_complete = false;
1350
1351 return __compat_vma_mmap(&desc, vma);
1352}
1353
1354static inline void vma_iter_init(struct vma_iterator *vmi,
1355 struct mm_struct *mm, unsigned long addr)
1356{
1357 mas_init(&vmi->mas, &mm->mm_mt, addr);
1358}
1359
1360static inline void mmap_assert_locked(struct mm_struct *);
1361static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1362 unsigned long start_addr,
1363 unsigned long end_addr)
1364{
1365 unsigned long index = start_addr;
1366
1367 mmap_assert_locked(mm);
1368 return mt_find(&mm->mm_mt, &index, end_addr - 1);
1369}
1370
1371static inline
1372struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
1373{
1374 return mtree_load(&mm->mm_mt, addr);
1375}
1376
1377static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1378{
1379 return mas_prev(&vmi->mas, 0);
1380}
1381
1382static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1383{
1384 mas_set(&vmi->mas, addr);
1385}
1386
1387static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1388{
1389 return !vma->vm_ops;
1390}
1391
1392/* Defined in vma.h, so temporarily define here to avoid circular dependency. */
1393#define vma_iter_load(vmi) \
1394 mas_walk(&(vmi)->mas)
1395
1396static inline struct vm_area_struct *
1397find_vma_prev(struct mm_struct *mm, unsigned long addr,
1398 struct vm_area_struct **pprev)
1399{
1400 struct vm_area_struct *vma;
1401 VMA_ITERATOR(vmi, mm, addr);
1402
1403 vma = vma_iter_load(&vmi);
1404 *pprev = vma_prev(&vmi);
1405 if (!vma)
1406 vma = vma_next(&vmi);
1407 return vma;
1408}
1409
1410#undef vma_iter_load
1411
1412static inline void vma_iter_free(struct vma_iterator *vmi)
1413{
1414 mas_destroy(&vmi->mas);
1415}
1416
1417static inline
1418struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1419{
1420 return mas_next_range(&vmi->mas, ULONG_MAX);
1421}
1422
1423bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1424
1425/* Update vma->vm_page_prot to reflect vma->vm_flags. */
1426static inline void vma_set_page_prot(struct vm_area_struct *vma)
1427{
1428 vm_flags_t vm_flags = vma->vm_flags;
1429 pgprot_t vm_page_prot;
1430
1431 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1432 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
1433
1434 if (vma_wants_writenotify(vma, vm_page_prot)) {
1435 vm_flags &= ~VM_SHARED;
1436 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */
1437 vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
1438 }
1439 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
1440 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
1441}
1442
1443static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
1444{
1445 if (vma->vm_flags & VM_GROWSDOWN)
1446 return stack_guard_gap;
1447
1448 /* See reasoning around the VM_SHADOW_STACK definition */
1449 if (vma->vm_flags & VM_SHADOW_STACK)
1450 return PAGE_SIZE;
1451
1452 return 0;
1453}
1454
1455static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
1456{
1457 unsigned long gap = stack_guard_start_gap(vma);
1458 unsigned long vm_start = vma->vm_start;
1459
1460 vm_start -= gap;
1461 if (vm_start > vma->vm_start)
1462 vm_start = 0;
1463 return vm_start;
1464}
1465
1466static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
1467{
1468 unsigned long vm_end = vma->vm_end;
1469
1470 if (vma->vm_flags & VM_GROWSUP) {
1471 vm_end += stack_guard_gap;
1472 if (vm_end < vma->vm_end)
1473 vm_end = -PAGE_SIZE;
1474 }
1475 return vm_end;
1476}
1477
1478static inline bool vma_is_accessible(struct vm_area_struct *vma)
1479{
1480 return vma->vm_flags & VM_ACCESS_FLAGS;
1481}
1482
1483static inline bool mlock_future_ok(const struct mm_struct *mm,
1484 vm_flags_t vm_flags, unsigned long bytes)
1485{
1486 unsigned long locked_pages, limit_pages;
1487
1488 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1489 return true;
1490
1491 locked_pages = bytes >> PAGE_SHIFT;
1492 locked_pages += mm->locked_vm;
1493
1494 limit_pages = rlimit(RLIMIT_MEMLOCK);
1495 limit_pages >>= PAGE_SHIFT;
1496
1497 return locked_pages <= limit_pages;
1498}
1499
1500static inline int mapping_map_writable(struct address_space *mapping)
1501{
1502 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
1503 0 : -EPERM;
1504}
1505
1506/* Did the driver provide valid mmap hook configuration? */
1507static inline bool can_mmap_file(struct file *file)
1508{
1509 bool has_mmap = file->f_op->mmap;
1510 bool has_mmap_prepare = file->f_op->mmap_prepare;
1511
1512 /* Hooks are mutually exclusive. */
1513 if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
1514 return false;
1515 if (!has_mmap && !has_mmap_prepare)
1516 return false;
1517
1518 return true;
1519}
1520
1521static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
1522{
1523 if (file->f_op->mmap_prepare)
1524 return compat_vma_mmap(file, vma);
1525
1526 return file->f_op->mmap(file, vma);
1527}
1528
1529static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
1530{
1531 /* Changing an anonymous vma with this is illegal */
1532 get_file(file);
1533 swap(vma->vm_file, file);
1534 fput(file);
1535}
1536
1537extern int sysctl_max_map_count;
1538static inline int get_sysctl_max_map_count(void)
1539{
1540 return READ_ONCE(sysctl_max_map_count);
1541}
1542
1543#ifndef pgtable_supports_soft_dirty
1544#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
1545#endif
1546
1547static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
1548{
1549 const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
1550
1551 return vm_get_page_prot(vm_flags);
1552}