Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5 * lockups) along with other things that don't fit well into existing LKDTM
6 * test source files.
7 */
8#include "lkdtm.h"
9#include <linux/cpu.h>
10#include <linux/list.h>
11#include <linux/hrtimer.h>
12#include <linux/sched.h>
13#include <linux/sched/signal.h>
14#include <linux/sched/task_stack.h>
15#include <linux/slab.h>
16#include <linux/stop_machine.h>
17#include <linux/uaccess.h>
18
19#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
20#include <asm/desc.h>
21#endif
22
23struct lkdtm_list {
24 struct list_head node;
25};
26
27/*
28 * Make sure our attempts to over run the kernel stack doesn't trigger
29 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
30 * recurse past the end of THREAD_SIZE by default.
31 */
32#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
33#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
34#else
35#define REC_STACK_SIZE (THREAD_SIZE / 8UL)
36#endif
37#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
38
39static int recur_count = REC_NUM_DEFAULT;
40
41static DEFINE_SPINLOCK(lock_me_up);
42
43/*
44 * Make sure compiler does not optimize this function or stack frame away:
45 * - function marked noinline
46 * - stack variables are marked volatile
47 * - stack variables are written (memset()) and read (buf[..] passed as arg)
48 * - function may have external effects (memzero_explicit())
49 * - no tail recursion possible
50 */
51static int noinline recursive_loop(int remaining)
52{
53 volatile char buf[REC_STACK_SIZE];
54 volatile int ret;
55
56 memset((void *)buf, remaining & 0xFF, sizeof(buf));
57 if (!remaining)
58 ret = 0;
59 else
60 ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
61 memzero_explicit((void *)buf, sizeof(buf));
62 return ret;
63}
64
65/* If the depth is negative, use the default, otherwise keep parameter. */
66void __init lkdtm_bugs_init(int *recur_param)
67{
68 if (*recur_param < 0)
69 *recur_param = recur_count;
70 else
71 recur_count = *recur_param;
72}
73
74static void lkdtm_PANIC(void)
75{
76 panic("dumptest");
77}
78
79static int panic_stop_irqoff_fn(void *arg)
80{
81 atomic_t *v = arg;
82
83 /*
84 * As stop_machine() disables interrupts, all CPUs within this function
85 * have interrupts disabled and cannot take a regular IPI.
86 *
87 * The last CPU which enters here will trigger a panic, and as all CPUs
88 * cannot take a regular IPI, we'll only be able to stop secondaries if
89 * smp_send_stop() or crash_smp_send_stop() uses an NMI.
90 */
91 if (atomic_inc_return(v) == num_online_cpus())
92 panic("panic stop irqoff test");
93
94 for (;;)
95 cpu_relax();
96}
97
98static void lkdtm_PANIC_STOP_IRQOFF(void)
99{
100 atomic_t v = ATOMIC_INIT(0);
101 stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask);
102}
103
104static bool wait_for_panic;
105
106static enum hrtimer_restart panic_in_hardirq(struct hrtimer *timer)
107{
108 panic("from hard IRQ context");
109
110 wait_for_panic = false;
111 return HRTIMER_NORESTART;
112}
113
114static void lkdtm_PANIC_IN_HARDIRQ(void)
115{
116 struct hrtimer timer;
117
118 wait_for_panic = true;
119 hrtimer_setup_on_stack(&timer, panic_in_hardirq,
120 CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
121 hrtimer_start(&timer, us_to_ktime(100), HRTIMER_MODE_REL_HARD);
122
123 while (READ_ONCE(wait_for_panic))
124 cpu_relax();
125
126 hrtimer_cancel(&timer);
127}
128
129static void lkdtm_BUG(void)
130{
131 BUG();
132}
133
134static bool wait_for_bug;
135
136static enum hrtimer_restart bug_in_hardirq(struct hrtimer *timer)
137{
138 BUG();
139
140 wait_for_bug = false;
141 return HRTIMER_NORESTART;
142}
143
144static void lkdtm_BUG_IN_HARDIRQ(void)
145{
146 struct hrtimer timer;
147
148 wait_for_bug = true;
149 hrtimer_setup_on_stack(&timer, bug_in_hardirq,
150 CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
151 hrtimer_start(&timer, us_to_ktime(100), HRTIMER_MODE_REL_HARD);
152
153 while (READ_ONCE(wait_for_bug))
154 cpu_relax();
155
156 hrtimer_cancel(&timer);
157}
158
159static int warn_counter;
160
161static void lkdtm_WARNING(void)
162{
163 WARN_ON(++warn_counter);
164}
165
166static void lkdtm_WARNING_MESSAGE(void)
167{
168 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
169}
170
171static void lkdtm_EXCEPTION(void)
172{
173 *((volatile int *) 0) = 0;
174}
175
176static void lkdtm_LOOP(void)
177{
178 for (;;)
179 ;
180}
181
182static void lkdtm_EXHAUST_STACK(void)
183{
184 pr_info("Calling function with %lu frame size to depth %d ...\n",
185 REC_STACK_SIZE, recur_count);
186 recursive_loop(recur_count);
187 pr_info("FAIL: survived without exhausting stack?!\n");
188}
189
190static noinline void __lkdtm_CORRUPT_STACK(void *stack)
191{
192 memset(stack, '\xff', 64);
193}
194
195/* This should trip the stack canary, not corrupt the return address. */
196static noinline void lkdtm_CORRUPT_STACK(void)
197{
198 /* Use default char array length that triggers stack protection. */
199 char data[8] __aligned(sizeof(void *));
200
201 pr_info("Corrupting stack containing char array ...\n");
202 __lkdtm_CORRUPT_STACK((void *)&data);
203}
204
205/* Same as above but will only get a canary with -fstack-protector-strong */
206static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
207{
208 union {
209 unsigned short shorts[4];
210 unsigned long *ptr;
211 } data __aligned(sizeof(void *));
212
213 pr_info("Corrupting stack containing union ...\n");
214 __lkdtm_CORRUPT_STACK((void *)&data);
215}
216
217static pid_t stack_pid;
218static unsigned long stack_addr;
219
220static void lkdtm_REPORT_STACK(void)
221{
222 volatile uintptr_t magic;
223 pid_t pid = task_pid_nr(current);
224
225 if (pid != stack_pid) {
226 pr_info("Starting stack offset tracking for pid %d\n", pid);
227 stack_pid = pid;
228 stack_addr = (uintptr_t)&magic;
229 }
230
231 pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
232}
233
234static pid_t stack_canary_pid;
235static unsigned long stack_canary;
236static unsigned long stack_canary_offset;
237
238static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
239{
240 int i = 0;
241 pid_t pid = task_pid_nr(current);
242 unsigned long *canary = (unsigned long *)stack;
243 unsigned long current_offset = 0, init_offset = 0;
244
245 /* Do our best to find the canary in a 16 word window ... */
246 for (i = 1; i < 16; i++) {
247 canary = (unsigned long *)stack + i;
248#ifdef CONFIG_STACKPROTECTOR
249 if (*canary == current->stack_canary)
250 current_offset = i;
251 if (*canary == init_task.stack_canary)
252 init_offset = i;
253#endif
254 }
255
256 if (current_offset == 0) {
257 /*
258 * If the canary doesn't match what's in the task_struct,
259 * we're either using a global canary or the stack frame
260 * layout changed.
261 */
262 if (init_offset != 0) {
263 pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
264 init_offset, pid);
265 } else {
266 pr_warn("FAIL: did not correctly locate stack canary :(\n");
267 pr_expected_config(CONFIG_STACKPROTECTOR);
268 }
269
270 return;
271 } else if (init_offset != 0) {
272 pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
273 }
274
275 canary = (unsigned long *)stack + current_offset;
276 if (stack_canary_pid == 0) {
277 stack_canary = *canary;
278 stack_canary_pid = pid;
279 stack_canary_offset = current_offset;
280 pr_info("Recorded stack canary for pid %d at offset %ld\n",
281 stack_canary_pid, stack_canary_offset);
282 } else if (pid == stack_canary_pid) {
283 pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
284 } else {
285 if (current_offset != stack_canary_offset) {
286 pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
287 stack_canary_offset, current_offset);
288 return;
289 }
290
291 if (*canary == stack_canary) {
292 pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
293 stack_canary_pid, pid, current_offset);
294 } else {
295 pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
296 stack_canary_pid, pid, current_offset);
297 /* Reset the test. */
298 stack_canary_pid = 0;
299 }
300 }
301}
302
303static void lkdtm_REPORT_STACK_CANARY(void)
304{
305 /* Use default char array length that triggers stack protection. */
306 char data[8] __aligned(sizeof(void *)) = { };
307
308 __lkdtm_REPORT_STACK_CANARY((void *)&data);
309}
310
311static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
312{
313 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
314 u32 *p;
315 u32 val = 0x12345678;
316
317 p = (u32 *)(data + 1);
318 if (*p == 0)
319 val = 0x87654321;
320 *p = val;
321
322 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
323 pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
324}
325
326static void lkdtm_SOFTLOCKUP(void)
327{
328 preempt_disable();
329 for (;;)
330 cpu_relax();
331}
332
333static void lkdtm_HARDLOCKUP(void)
334{
335 local_irq_disable();
336 for (;;)
337 cpu_relax();
338}
339
340static void __lkdtm_SMP_CALL_LOCKUP(void *unused)
341{
342 for (;;)
343 cpu_relax();
344}
345
346static void lkdtm_SMP_CALL_LOCKUP(void)
347{
348 unsigned int cpu, target;
349
350 cpus_read_lock();
351
352 cpu = get_cpu();
353 target = cpumask_any_but(cpu_online_mask, cpu);
354
355 if (target >= nr_cpu_ids) {
356 pr_err("FAIL: no other online CPUs\n");
357 goto out_put_cpus;
358 }
359
360 smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1);
361
362 pr_err("FAIL: did not hang\n");
363
364out_put_cpus:
365 put_cpu();
366 cpus_read_unlock();
367}
368
369static void lkdtm_SPINLOCKUP(void)
370{
371 /* Must be called twice to trigger. */
372 spin_lock(&lock_me_up);
373 /* Let sparse know we intended to exit holding the lock. */
374 __release(&lock_me_up);
375}
376
377static void __noreturn lkdtm_HUNG_TASK(void)
378{
379 set_current_state(TASK_UNINTERRUPTIBLE);
380 schedule();
381 BUG();
382}
383
384static volatile unsigned int huge = INT_MAX - 2;
385static volatile unsigned int ignored;
386
387static void lkdtm_OVERFLOW_SIGNED(void)
388{
389 int value;
390
391 value = huge;
392 pr_info("Normal signed addition ...\n");
393 value += 1;
394 ignored = value;
395
396 pr_info("Overflowing signed addition ...\n");
397 value += 4;
398 ignored = value;
399}
400
401
402static void lkdtm_OVERFLOW_UNSIGNED(void)
403{
404 unsigned int value;
405
406 value = huge;
407 pr_info("Normal unsigned addition ...\n");
408 value += 1;
409 ignored = value;
410
411 pr_info("Overflowing unsigned addition ...\n");
412 value += 4;
413 ignored = value;
414}
415
416/* Intentionally using unannotated flex array definition. */
417struct array_bounds_flex_array {
418 int one;
419 int two;
420 char data[];
421};
422
423struct array_bounds {
424 int one;
425 int two;
426 char data[8];
427 int three;
428};
429
430static void lkdtm_ARRAY_BOUNDS(void)
431{
432 struct array_bounds_flex_array *not_checked;
433 struct array_bounds *checked;
434 volatile int i;
435
436 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
437 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
438 if (!not_checked || !checked) {
439 kfree(not_checked);
440 kfree(checked);
441 return;
442 }
443
444 pr_info("Array access within bounds ...\n");
445 /* For both, touch all bytes in the actual member size. */
446 for (i = 0; i < sizeof(checked->data); i++)
447 checked->data[i] = 'A';
448 /*
449 * For the uninstrumented flex array member, also touch 1 byte
450 * beyond to verify it is correctly uninstrumented.
451 */
452 for (i = 0; i < 2; i++)
453 not_checked->data[i] = 'A';
454
455 pr_info("Array access beyond bounds ...\n");
456 for (i = 0; i < sizeof(checked->data) + 1; i++)
457 checked->data[i] = 'B';
458
459 kfree(not_checked);
460 kfree(checked);
461 pr_err("FAIL: survived array bounds overflow!\n");
462 if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
463 pr_expected_config(CONFIG_UBSAN_TRAP);
464 else
465 pr_expected_config(CONFIG_UBSAN_BOUNDS);
466}
467
468struct lkdtm_cb_fam {
469 unsigned long flags;
470 int count;
471 int array[] __counted_by(count);
472};
473
474static volatile int element_count = 4;
475
476static void lkdtm_FAM_BOUNDS(void)
477{
478 struct lkdtm_cb_fam *inst;
479
480 inst = kzalloc_flex(*inst, array, element_count + 1);
481 if (!inst) {
482 pr_err("FAIL: could not allocate test struct!\n");
483 return;
484 }
485
486 inst->count = element_count;
487 pr_info("Array access within bounds ...\n");
488 inst->array[1] = element_count;
489 ignored = inst->array[1];
490
491 pr_info("Array access beyond bounds ...\n");
492 inst->array[element_count] = element_count;
493 ignored = inst->array[element_count];
494
495 kfree(inst);
496
497 pr_err("FAIL: survived access of invalid flexible array member index!\n");
498
499 if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
500 pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
501 lkdtm_kernel_info);
502 else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
503 pr_expected_config(CONFIG_UBSAN_TRAP);
504 else
505 pr_expected_config(CONFIG_UBSAN_BOUNDS);
506}
507
508struct lkdtm_extra {
509 short a, b;
510 u16 sixteen;
511 u32 bigger;
512 u64 biggest;
513};
514
515struct lkdtm_cb_ptr {
516 int a, b, c;
517 int nr_extra;
518 char *buf __counted_by_ptr(len);
519 size_t len;
520 struct lkdtm_extra *extra __counted_by_ptr(nr_extra);
521};
522
523static noinline void check_ptr_len(struct lkdtm_cb_ptr *p, size_t len)
524{
525 if (__member_size(p->buf) != len)
526 pr_err("FAIL: could not determine size of inst->buf: %zu\n",
527 __member_size(p->buf));
528 else
529 pr_info("good: inst->buf length is %zu\n", len);
530}
531
532static void lkdtm_PTR_BOUNDS(void)
533{
534 struct lkdtm_cb_ptr *inst;
535
536 inst = kzalloc_obj(*inst);
537 if (!inst) {
538 pr_err("FAIL: could not allocate struct lkdtm_cb_ptr!\n");
539 return;
540 }
541
542 inst->buf = kzalloc(element_count, GFP_KERNEL);
543 if (!inst->buf) {
544 pr_err("FAIL: could not allocate inst->buf!\n");
545 return;
546 }
547 inst->len = element_count;
548
549 /* Double element_count */
550 inst->extra = kzalloc_objs(*inst->extra, element_count * 2);
551 inst->nr_extra = element_count * 2;
552
553 pr_info("Pointer access within bounds ...\n");
554 check_ptr_len(inst, 4);
555 /* All 4 bytes */
556 inst->buf[0] = 'A';
557 inst->buf[1] = 'B';
558 inst->buf[2] = 'C';
559 inst->buf[3] = 'D';
560 /* Halfway into the array */
561 inst->extra[element_count].biggest = 0x1000;
562
563 pr_info("Pointer access beyond bounds ...\n");
564 ignored = inst->extra[inst->nr_extra].b;
565
566 kfree(inst->extra);
567 kfree(inst->buf);
568 kfree(inst);
569
570 pr_err("FAIL: survived access of invalid pointer member offset!\n");
571
572 if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY_PTR))
573 pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by_ptr\n",
574 lkdtm_kernel_info);
575 else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
576 pr_expected_config(CONFIG_UBSAN_TRAP);
577 else
578 pr_expected_config(CONFIG_UBSAN_BOUNDS);
579}
580
581static void lkdtm_CORRUPT_LIST_ADD(void)
582{
583 /*
584 * Initially, an empty list via LIST_HEAD:
585 * test_head.next = &test_head
586 * test_head.prev = &test_head
587 */
588 LIST_HEAD(test_head);
589 struct lkdtm_list good, bad;
590 void *target[2] = { };
591 void *redirection = ⌖
592
593 pr_info("attempting good list addition\n");
594
595 /*
596 * Adding to the list performs these actions:
597 * test_head.next->prev = &good.node
598 * good.node.next = test_head.next
599 * good.node.prev = test_head
600 * test_head.next = good.node
601 */
602 list_add(&good.node, &test_head);
603
604 pr_info("attempting corrupted list addition\n");
605 /*
606 * In simulating this "write what where" primitive, the "what" is
607 * the address of &bad.node, and the "where" is the address held
608 * by "redirection".
609 */
610 test_head.next = redirection;
611 list_add(&bad.node, &test_head);
612
613 if (target[0] == NULL && target[1] == NULL)
614 pr_err("Overwrite did not happen, but no BUG?!\n");
615 else {
616 pr_err("list_add() corruption not detected!\n");
617 pr_expected_config(CONFIG_LIST_HARDENED);
618 }
619}
620
621static void lkdtm_CORRUPT_LIST_DEL(void)
622{
623 LIST_HEAD(test_head);
624 struct lkdtm_list item;
625 void *target[2] = { };
626 void *redirection = ⌖
627
628 list_add(&item.node, &test_head);
629
630 pr_info("attempting good list removal\n");
631 list_del(&item.node);
632
633 pr_info("attempting corrupted list removal\n");
634 list_add(&item.node, &test_head);
635
636 /* As with the list_add() test above, this corrupts "next". */
637 item.node.next = redirection;
638 list_del(&item.node);
639
640 if (target[0] == NULL && target[1] == NULL)
641 pr_err("Overwrite did not happen, but no BUG?!\n");
642 else {
643 pr_err("list_del() corruption not detected!\n");
644 pr_expected_config(CONFIG_LIST_HARDENED);
645 }
646}
647
648/* Test that VMAP_STACK is actually allocating with a leading guard page */
649static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
650{
651 const unsigned char *stack = task_stack_page(current);
652 const unsigned char *ptr = stack - 1;
653 volatile unsigned char byte;
654
655 pr_info("attempting bad read from page below current stack\n");
656
657 byte = *ptr;
658
659 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
660}
661
662/* Test that VMAP_STACK is actually allocating with a trailing guard page */
663static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
664{
665 const unsigned char *stack = task_stack_page(current);
666 const unsigned char *ptr = stack + THREAD_SIZE;
667 volatile unsigned char byte;
668
669 pr_info("attempting bad read from page above current stack\n");
670
671 byte = *ptr;
672
673 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
674}
675
676static void lkdtm_UNSET_SMEP(void)
677{
678#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
679#define MOV_CR4_DEPTH 64
680 void (*direct_write_cr4)(unsigned long val);
681 unsigned char *insn;
682 unsigned long cr4;
683 int i;
684
685 cr4 = native_read_cr4();
686
687 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
688 pr_err("FAIL: SMEP not in use\n");
689 return;
690 }
691 cr4 &= ~(X86_CR4_SMEP);
692
693 pr_info("trying to clear SMEP normally\n");
694 native_write_cr4(cr4);
695 if (cr4 == native_read_cr4()) {
696 pr_err("FAIL: pinning SMEP failed!\n");
697 cr4 |= X86_CR4_SMEP;
698 pr_info("restoring SMEP\n");
699 native_write_cr4(cr4);
700 return;
701 }
702 pr_info("ok: SMEP did not get cleared\n");
703
704 /*
705 * To test the post-write pinning verification we need to call
706 * directly into the middle of native_write_cr4() where the
707 * cr4 write happens, skipping any pinning. This searches for
708 * the cr4 writing instruction.
709 */
710 insn = (unsigned char *)native_write_cr4;
711 OPTIMIZER_HIDE_VAR(insn);
712 for (i = 0; i < MOV_CR4_DEPTH; i++) {
713 /* mov %rdi, %cr4 */
714 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
715 break;
716 /* mov %rdi,%rax; mov %rax, %cr4 */
717 if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
718 insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
719 insn[i+4] == 0x22 && insn[i+5] == 0xe0)
720 break;
721 }
722 if (i >= MOV_CR4_DEPTH) {
723 pr_info("ok: cannot locate cr4 writing call gadget\n");
724 return;
725 }
726 direct_write_cr4 = (void *)(insn + i);
727
728 pr_info("trying to clear SMEP with call gadget\n");
729 direct_write_cr4(cr4);
730 if (native_read_cr4() & X86_CR4_SMEP) {
731 pr_info("ok: SMEP removal was reverted\n");
732 } else {
733 pr_err("FAIL: cleared SMEP not detected!\n");
734 cr4 |= X86_CR4_SMEP;
735 pr_info("restoring SMEP\n");
736 native_write_cr4(cr4);
737 }
738#else
739 pr_err("XFAIL: this test is x86_64-only\n");
740#endif
741}
742
743static void lkdtm_DOUBLE_FAULT(void)
744{
745#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
746 /*
747 * Trigger #DF by setting the stack limit to zero. This clobbers
748 * a GDT TLS slot, which is okay because the current task will die
749 * anyway due to the double fault.
750 */
751 struct desc_struct d = {
752 .type = 3, /* expand-up, writable, accessed data */
753 .p = 1, /* present */
754 .d = 1, /* 32-bit */
755 .g = 0, /* limit in bytes */
756 .s = 1, /* not system */
757 };
758
759 local_irq_disable();
760 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
761 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
762
763 /*
764 * Put our zero-limit segment in SS and then trigger a fault. The
765 * 4-byte access to (%esp) will fault with #SS, and the attempt to
766 * deliver the fault will recursively cause #SS and result in #DF.
767 * This whole process happens while NMIs and MCEs are blocked by the
768 * MOV SS window. This is nice because an NMI with an invalid SS
769 * would also double-fault, resulting in the NMI or MCE being lost.
770 */
771 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
772 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
773
774 pr_err("FAIL: tried to double fault but didn't die\n");
775#else
776 pr_err("XFAIL: this test is ia32-only\n");
777#endif
778}
779
780#ifdef CONFIG_ARM64
781static noinline void change_pac_parameters(void)
782{
783 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
784 /* Reset the keys of current task */
785 ptrauth_thread_init_kernel(current);
786 ptrauth_thread_switch_kernel(current);
787 }
788}
789#endif
790
791static noinline void lkdtm_CORRUPT_PAC(void)
792{
793#ifdef CONFIG_ARM64
794#define CORRUPT_PAC_ITERATE 10
795 int i;
796
797 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
798 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
799
800 if (!system_supports_address_auth()) {
801 pr_err("FAIL: CPU lacks pointer authentication feature\n");
802 return;
803 }
804
805 pr_info("changing PAC parameters to force function return failure...\n");
806 /*
807 * PAC is a hash value computed from input keys, return address and
808 * stack pointer. As pac has fewer bits so there is a chance of
809 * collision, so iterate few times to reduce the collision probability.
810 */
811 for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
812 change_pac_parameters();
813
814 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
815#else
816 pr_err("XFAIL: this test is arm64-only\n");
817#endif
818}
819
820static struct crashtype crashtypes[] = {
821 CRASHTYPE(PANIC),
822 CRASHTYPE(PANIC_STOP_IRQOFF),
823 CRASHTYPE(PANIC_IN_HARDIRQ),
824 CRASHTYPE(BUG),
825 CRASHTYPE(BUG_IN_HARDIRQ),
826 CRASHTYPE(WARNING),
827 CRASHTYPE(WARNING_MESSAGE),
828 CRASHTYPE(EXCEPTION),
829 CRASHTYPE(LOOP),
830 CRASHTYPE(EXHAUST_STACK),
831 CRASHTYPE(CORRUPT_STACK),
832 CRASHTYPE(CORRUPT_STACK_STRONG),
833 CRASHTYPE(REPORT_STACK),
834 CRASHTYPE(REPORT_STACK_CANARY),
835 CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
836 CRASHTYPE(SOFTLOCKUP),
837 CRASHTYPE(HARDLOCKUP),
838 CRASHTYPE(SMP_CALL_LOCKUP),
839 CRASHTYPE(SPINLOCKUP),
840 CRASHTYPE(HUNG_TASK),
841 CRASHTYPE(OVERFLOW_SIGNED),
842 CRASHTYPE(OVERFLOW_UNSIGNED),
843 CRASHTYPE(ARRAY_BOUNDS),
844 CRASHTYPE(FAM_BOUNDS),
845 CRASHTYPE(PTR_BOUNDS),
846 CRASHTYPE(CORRUPT_LIST_ADD),
847 CRASHTYPE(CORRUPT_LIST_DEL),
848 CRASHTYPE(STACK_GUARD_PAGE_LEADING),
849 CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
850 CRASHTYPE(UNSET_SMEP),
851 CRASHTYPE(DOUBLE_FAULT),
852 CRASHTYPE(CORRUPT_PAC),
853};
854
855struct crashtype_category bugs_crashtypes = {
856 .crashtypes = crashtypes,
857 .len = ARRAY_SIZE(crashtypes),
858};