Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2002 Richard Henderson
4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
6 * Copyright (C) 2024 Mike Rapoport IBM.
7 */
8
9#define pr_fmt(fmt) "execmem: " fmt
10
11#include <linux/mm.h>
12#include <linux/mutex.h>
13#include <linux/vmalloc.h>
14#include <linux/execmem.h>
15#include <linux/maple_tree.h>
16#include <linux/set_memory.h>
17#include <linux/moduleloader.h>
18#include <linux/text-patching.h>
19
20#include <asm/tlbflush.h>
21
22#include "internal.h"
23
24static struct execmem_info *execmem_info __ro_after_init;
25static struct execmem_info default_execmem_info __ro_after_init;
26
27#ifdef CONFIG_MMU
28static void *execmem_vmalloc(struct execmem_range *range, size_t size,
29 pgprot_t pgprot, unsigned long vm_flags)
30{
31 bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
32 gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
33 unsigned int align = range->alignment;
34 unsigned long start = range->start;
35 unsigned long end = range->end;
36 void *p;
37
38 if (kasan)
39 vm_flags |= VM_DEFER_KMEMLEAK;
40
41 p = __vmalloc_node_range(size, align, start, end, gfp_flags,
42 pgprot, vm_flags, NUMA_NO_NODE,
43 __builtin_return_address(0));
44 if (!p && range->fallback_start) {
45 start = range->fallback_start;
46 end = range->fallback_end;
47 p = __vmalloc_node_range(size, align, start, end, gfp_flags,
48 pgprot, vm_flags, NUMA_NO_NODE,
49 __builtin_return_address(0));
50 }
51
52 if (!p) {
53 pr_warn_ratelimited("unable to allocate memory\n");
54 return NULL;
55 }
56
57 if (kasan && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
58 vfree(p);
59 return NULL;
60 }
61
62 return p;
63}
64
65struct vm_struct *execmem_vmap(size_t size)
66{
67 struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
68 struct vm_struct *area;
69
70 area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
71 range->start, range->end, NUMA_NO_NODE,
72 GFP_KERNEL, __builtin_return_address(0));
73 if (!area && range->fallback_start)
74 area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
75 range->fallback_start, range->fallback_end,
76 NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
77
78 return area;
79}
80#else
81static void *execmem_vmalloc(struct execmem_range *range, size_t size,
82 pgprot_t pgprot, unsigned long vm_flags)
83{
84 return vmalloc(size);
85}
86#endif /* CONFIG_MMU */
87
88#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
89struct execmem_cache {
90 struct mutex mutex;
91 struct maple_tree busy_areas;
92 struct maple_tree free_areas;
93 unsigned int pending_free_cnt; /* protected by mutex */
94};
95
96/* delay to schedule asynchronous free if fast path free fails */
97#define FREE_DELAY (msecs_to_jiffies(10))
98
99/* mark entries in busy_areas that should be freed asynchronously */
100#define PENDING_FREE_MASK (1 << (PAGE_SHIFT - 1))
101
102static struct execmem_cache execmem_cache = {
103 .mutex = __MUTEX_INITIALIZER(execmem_cache.mutex),
104 .busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN,
105 execmem_cache.mutex),
106 .free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN,
107 execmem_cache.mutex),
108};
109
110static inline unsigned long mas_range_len(struct ma_state *mas)
111{
112 return mas->last - mas->index + 1;
113}
114
115static int execmem_set_direct_map_valid(struct vm_struct *vm, bool valid)
116{
117 unsigned int nr = (1 << get_vm_area_page_order(vm));
118 unsigned int updated = 0;
119 int err = 0;
120
121 for (int i = 0; i < vm->nr_pages; i += nr) {
122 err = set_direct_map_valid_noflush(vm->pages[i], nr, valid);
123 if (err)
124 goto err_restore;
125 updated += nr;
126 }
127
128 return 0;
129
130err_restore:
131 for (int i = 0; i < updated; i += nr)
132 set_direct_map_valid_noflush(vm->pages[i], nr, !valid);
133
134 return err;
135}
136
137static int execmem_force_rw(void *ptr, size_t size)
138{
139 unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
140 unsigned long addr = (unsigned long)ptr;
141 int ret;
142
143 ret = set_memory_nx(addr, nr);
144 if (ret)
145 return ret;
146
147 return set_memory_rw(addr, nr);
148}
149
150int execmem_restore_rox(void *ptr, size_t size)
151{
152 unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT;
153 unsigned long addr = (unsigned long)ptr;
154
155 return set_memory_rox(addr, nr);
156}
157
158static void execmem_cache_clean(struct work_struct *work)
159{
160 struct maple_tree *free_areas = &execmem_cache.free_areas;
161 struct mutex *mutex = &execmem_cache.mutex;
162 MA_STATE(mas, free_areas, 0, ULONG_MAX);
163 void *area;
164
165 mutex_lock(mutex);
166 mas_for_each(&mas, area, ULONG_MAX) {
167 size_t size = mas_range_len(&mas);
168
169 if (IS_ALIGNED(size, PMD_SIZE) &&
170 IS_ALIGNED(mas.index, PMD_SIZE)) {
171 struct vm_struct *vm = find_vm_area(area);
172
173 execmem_set_direct_map_valid(vm, true);
174 mas_store_gfp(&mas, NULL, GFP_KERNEL);
175 vfree(area);
176 }
177 }
178 mutex_unlock(mutex);
179}
180
181static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean);
182
183static int execmem_cache_add_locked(void *ptr, size_t size, gfp_t gfp_mask)
184{
185 struct maple_tree *free_areas = &execmem_cache.free_areas;
186 unsigned long addr = (unsigned long)ptr;
187 MA_STATE(mas, free_areas, addr - 1, addr + 1);
188 unsigned long lower, upper;
189 void *area = NULL;
190
191 lower = addr;
192 upper = addr + size - 1;
193
194 area = mas_walk(&mas);
195 if (area && mas.last == addr - 1)
196 lower = mas.index;
197
198 area = mas_next(&mas, ULONG_MAX);
199 if (area && mas.index == addr + size)
200 upper = mas.last;
201
202 mas_set_range(&mas, lower, upper);
203 return mas_store_gfp(&mas, (void *)lower, gfp_mask);
204}
205
206static bool within_range(struct execmem_range *range, struct ma_state *mas,
207 size_t size)
208{
209 unsigned long addr = mas->index;
210
211 if (addr >= range->start && addr + size < range->end)
212 return true;
213
214 if (range->fallback_start &&
215 addr >= range->fallback_start && addr + size < range->fallback_end)
216 return true;
217
218 return false;
219}
220
221static void *execmem_cache_alloc_locked(struct execmem_range *range, size_t size)
222{
223 struct maple_tree *free_areas = &execmem_cache.free_areas;
224 struct maple_tree *busy_areas = &execmem_cache.busy_areas;
225 MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
226 MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
227 unsigned long addr, last, area_size = 0;
228 void *area, *ptr = NULL;
229 int err;
230
231 mas_for_each(&mas_free, area, ULONG_MAX) {
232 area_size = mas_range_len(&mas_free);
233
234 if (area_size >= size && within_range(range, &mas_free, size))
235 break;
236 }
237
238 if (area_size < size)
239 return NULL;
240
241 addr = mas_free.index;
242 last = mas_free.last;
243
244 /* insert allocated size to busy_areas at range [addr, addr + size) */
245 mas_set_range(&mas_busy, addr, addr + size - 1);
246 err = mas_store_gfp(&mas_busy, (void *)addr, GFP_KERNEL);
247 if (err)
248 return NULL;
249
250 mas_store_gfp(&mas_free, NULL, GFP_KERNEL);
251 if (area_size > size) {
252 void *ptr = (void *)(addr + size);
253
254 /*
255 * re-insert remaining free size to free_areas at range
256 * [addr + size, last]
257 */
258 mas_set_range(&mas_free, addr + size, last);
259 err = mas_store_gfp(&mas_free, ptr, GFP_KERNEL);
260 if (err) {
261 mas_store_gfp(&mas_busy, NULL, GFP_KERNEL);
262 return NULL;
263 }
264 }
265 ptr = (void *)addr;
266
267 return ptr;
268}
269
270static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
271{
272 guard(mutex)(&execmem_cache.mutex);
273
274 return execmem_cache_alloc_locked(range, size);
275}
276
277static void *execmem_cache_populate_alloc(struct execmem_range *range, size_t size)
278{
279 unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
280 struct mutex *mutex = &execmem_cache.mutex;
281 struct vm_struct *vm;
282 size_t alloc_size;
283 int err = -ENOMEM;
284 void *p;
285
286 alloc_size = round_up(size, PMD_SIZE);
287 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
288 if (!p) {
289 alloc_size = size;
290 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
291 }
292
293 if (!p)
294 return NULL;
295
296 vm = find_vm_area(p);
297 if (!vm)
298 goto err_free_mem;
299
300 /* fill memory with instructions that will trap */
301 execmem_fill_trapping_insns(p, alloc_size);
302
303 err = set_memory_rox((unsigned long)p, vm->nr_pages);
304 if (err)
305 goto err_free_mem;
306
307 /*
308 * New memory blocks must be allocated and added to the cache
309 * as an atomic operation, otherwise they may be consumed
310 * by a parallel call to the execmem_cache_alloc function.
311 */
312 mutex_lock(mutex);
313 err = execmem_cache_add_locked(p, alloc_size, GFP_KERNEL);
314 if (err)
315 goto err_reset_direct_map;
316
317 p = execmem_cache_alloc_locked(range, size);
318
319 mutex_unlock(mutex);
320
321 return p;
322
323err_reset_direct_map:
324 mutex_unlock(mutex);
325 execmem_set_direct_map_valid(vm, true);
326err_free_mem:
327 vfree(p);
328 return NULL;
329}
330
331static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
332{
333 void *p;
334
335 p = __execmem_cache_alloc(range, size);
336 if (p)
337 return p;
338
339 return execmem_cache_populate_alloc(range, size);
340}
341
342static inline bool is_pending_free(void *ptr)
343{
344 return ((unsigned long)ptr & PENDING_FREE_MASK);
345}
346
347static inline void *pending_free_set(void *ptr)
348{
349 return (void *)((unsigned long)ptr | PENDING_FREE_MASK);
350}
351
352static inline void *pending_free_clear(void *ptr)
353{
354 return (void *)((unsigned long)ptr & ~PENDING_FREE_MASK);
355}
356
357static int __execmem_cache_free(struct ma_state *mas, void *ptr, gfp_t gfp_mask)
358{
359 size_t size = mas_range_len(mas);
360 int err;
361
362 err = execmem_force_rw(ptr, size);
363 if (err)
364 return err;
365
366 execmem_fill_trapping_insns(ptr, size);
367 execmem_restore_rox(ptr, size);
368
369 err = execmem_cache_add_locked(ptr, size, gfp_mask);
370 if (err)
371 return err;
372
373 mas_store_gfp(mas, NULL, gfp_mask);
374 return 0;
375}
376
377static void execmem_cache_free_slow(struct work_struct *work);
378static DECLARE_DELAYED_WORK(execmem_cache_free_work, execmem_cache_free_slow);
379
380static void execmem_cache_free_slow(struct work_struct *work)
381{
382 struct maple_tree *busy_areas = &execmem_cache.busy_areas;
383 MA_STATE(mas, busy_areas, 0, ULONG_MAX);
384 void *area;
385
386 guard(mutex)(&execmem_cache.mutex);
387
388 if (!execmem_cache.pending_free_cnt)
389 return;
390
391 mas_for_each(&mas, area, ULONG_MAX) {
392 if (!is_pending_free(area))
393 continue;
394
395 area = pending_free_clear(area);
396 if (__execmem_cache_free(&mas, area, GFP_KERNEL))
397 continue;
398
399 execmem_cache.pending_free_cnt--;
400 }
401
402 if (execmem_cache.pending_free_cnt)
403 schedule_delayed_work(&execmem_cache_free_work, FREE_DELAY);
404 else
405 schedule_work(&execmem_cache_clean_work);
406}
407
408static bool execmem_cache_free(void *ptr)
409{
410 struct maple_tree *busy_areas = &execmem_cache.busy_areas;
411 unsigned long addr = (unsigned long)ptr;
412 MA_STATE(mas, busy_areas, addr, addr);
413 void *area;
414 int err;
415
416 guard(mutex)(&execmem_cache.mutex);
417
418 area = mas_walk(&mas);
419 if (!area)
420 return false;
421
422 err = __execmem_cache_free(&mas, area, GFP_KERNEL | __GFP_NORETRY);
423 if (err) {
424 /*
425 * mas points to exact slot we've got the area from, nothing
426 * else can modify the tree because of the mutex, so there
427 * won't be any allocations in mas_store_gfp() and it will just
428 * change the pointer.
429 */
430 area = pending_free_set(area);
431 mas_store_gfp(&mas, area, GFP_KERNEL);
432 execmem_cache.pending_free_cnt++;
433 schedule_delayed_work(&execmem_cache_free_work, FREE_DELAY);
434 return true;
435 }
436
437 schedule_work(&execmem_cache_clean_work);
438
439 return true;
440}
441
442#else /* CONFIG_ARCH_HAS_EXECMEM_ROX */
443/*
444 * when ROX cache is not used the permissions defined by architectures for
445 * execmem ranges that are updated before use (e.g. EXECMEM_MODULE_TEXT) must
446 * be writable anyway
447 */
448static inline int execmem_force_rw(void *ptr, size_t size)
449{
450 return 0;
451}
452
453static void *execmem_cache_alloc(struct execmem_range *range, size_t size)
454{
455 return NULL;
456}
457
458static bool execmem_cache_free(void *ptr)
459{
460 return false;
461}
462#endif /* CONFIG_ARCH_HAS_EXECMEM_ROX */
463
464void *execmem_alloc(enum execmem_type type, size_t size)
465{
466 struct execmem_range *range = &execmem_info->ranges[type];
467 bool use_cache = range->flags & EXECMEM_ROX_CACHE;
468 unsigned long vm_flags = VM_FLUSH_RESET_PERMS;
469 pgprot_t pgprot = range->pgprot;
470 void *p = NULL;
471
472 size = PAGE_ALIGN(size);
473
474 if (use_cache)
475 p = execmem_cache_alloc(range, size);
476 else
477 p = execmem_vmalloc(range, size, pgprot, vm_flags);
478
479 return kasan_reset_tag(p);
480}
481
482void *execmem_alloc_rw(enum execmem_type type, size_t size)
483{
484 void *p __free(execmem) = execmem_alloc(type, size);
485 int err;
486
487 if (!p)
488 return NULL;
489
490 err = execmem_force_rw(p, size);
491 if (err)
492 return NULL;
493
494 return no_free_ptr(p);
495}
496
497void execmem_free(void *ptr)
498{
499 /*
500 * This memory may be RO, and freeing RO memory in an interrupt is not
501 * supported by vmalloc.
502 */
503 WARN_ON(in_interrupt());
504
505 if (!execmem_cache_free(ptr))
506 vfree(ptr);
507}
508
509bool execmem_is_rox(enum execmem_type type)
510{
511 return !!(execmem_info->ranges[type].flags & EXECMEM_ROX_CACHE);
512}
513
514static bool execmem_validate(struct execmem_info *info)
515{
516 struct execmem_range *r = &info->ranges[EXECMEM_DEFAULT];
517
518 if (!r->alignment || !r->start || !r->end || !pgprot_val(r->pgprot)) {
519 pr_crit("Invalid parameters for execmem allocator, module loading will fail");
520 return false;
521 }
522
523 if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) {
524 for (int i = EXECMEM_DEFAULT; i < EXECMEM_TYPE_MAX; i++) {
525 r = &info->ranges[i];
526
527 if (r->flags & EXECMEM_ROX_CACHE) {
528 pr_warn_once("ROX cache is not supported\n");
529 r->flags &= ~EXECMEM_ROX_CACHE;
530 }
531 }
532 }
533
534 return true;
535}
536
537static void execmem_init_missing(struct execmem_info *info)
538{
539 struct execmem_range *default_range = &info->ranges[EXECMEM_DEFAULT];
540
541 for (int i = EXECMEM_DEFAULT + 1; i < EXECMEM_TYPE_MAX; i++) {
542 struct execmem_range *r = &info->ranges[i];
543
544 if (!r->start) {
545 if (i == EXECMEM_MODULE_DATA)
546 r->pgprot = PAGE_KERNEL;
547 else
548 r->pgprot = default_range->pgprot;
549 r->alignment = default_range->alignment;
550 r->start = default_range->start;
551 r->end = default_range->end;
552 r->flags = default_range->flags;
553 r->fallback_start = default_range->fallback_start;
554 r->fallback_end = default_range->fallback_end;
555 }
556 }
557}
558
559struct execmem_info * __weak execmem_arch_setup(void)
560{
561 return NULL;
562}
563
564static void __init __execmem_init(void)
565{
566 struct execmem_info *info = execmem_arch_setup();
567
568 if (!info) {
569 info = execmem_info = &default_execmem_info;
570 info->ranges[EXECMEM_DEFAULT].start = VMALLOC_START;
571 info->ranges[EXECMEM_DEFAULT].end = VMALLOC_END;
572 info->ranges[EXECMEM_DEFAULT].pgprot = PAGE_KERNEL_EXEC;
573 info->ranges[EXECMEM_DEFAULT].alignment = 1;
574 }
575
576 if (!execmem_validate(info))
577 return;
578
579 execmem_init_missing(info);
580
581 execmem_info = info;
582}
583
584#ifdef CONFIG_ARCH_WANTS_EXECMEM_LATE
585static int __init execmem_late_init(void)
586{
587 __execmem_init();
588 return 0;
589}
590core_initcall(execmem_late_init);
591#else
592void __init execmem_init(void)
593{
594 __execmem_init();
595}
596#endif