Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <test_util.h>
3#include <kvm_util.h>
4#include <processor.h>
5#include <linux/bitfield.h>
6
7#define MDSCR_KDE (1 << 13)
8#define MDSCR_MDE (1 << 15)
9#define MDSCR_SS (1 << 0)
10
11#define DBGBCR_LEN8 (0xff << 5)
12#define DBGBCR_EXEC (0x0 << 3)
13#define DBGBCR_EL1 (0x1 << 1)
14#define DBGBCR_E (0x1 << 0)
15#define DBGBCR_LBN_SHIFT 16
16#define DBGBCR_BT_SHIFT 20
17#define DBGBCR_BT_ADDR_LINK_CTX (0x1 << DBGBCR_BT_SHIFT)
18#define DBGBCR_BT_CTX_LINK (0x3 << DBGBCR_BT_SHIFT)
19
20#define DBGWCR_LEN8 (0xff << 5)
21#define DBGWCR_RD (0x1 << 3)
22#define DBGWCR_WR (0x2 << 3)
23#define DBGWCR_EL1 (0x1 << 1)
24#define DBGWCR_E (0x1 << 0)
25#define DBGWCR_LBN_SHIFT 16
26#define DBGWCR_WT_SHIFT 20
27#define DBGWCR_WT_LINK (0x1 << DBGWCR_WT_SHIFT)
28
29#define SPSR_D (1 << 9)
30#define SPSR_SS (1 << 21)
31
32extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
33extern unsigned char iter_ss_begin, iter_ss_end;
34static volatile u64 sw_bp_addr, hw_bp_addr;
35static volatile u64 wp_addr, wp_data_addr;
36static volatile u64 svc_addr;
37static volatile u64 ss_addr[4], ss_idx;
38#define PC(v) ((u64)&(v))
39
40#define GEN_DEBUG_WRITE_REG(reg_name) \
41static void write_##reg_name(int num, u64 val) \
42{ \
43 switch (num) { \
44 case 0: \
45 write_sysreg(val, reg_name##0_el1); \
46 break; \
47 case 1: \
48 write_sysreg(val, reg_name##1_el1); \
49 break; \
50 case 2: \
51 write_sysreg(val, reg_name##2_el1); \
52 break; \
53 case 3: \
54 write_sysreg(val, reg_name##3_el1); \
55 break; \
56 case 4: \
57 write_sysreg(val, reg_name##4_el1); \
58 break; \
59 case 5: \
60 write_sysreg(val, reg_name##5_el1); \
61 break; \
62 case 6: \
63 write_sysreg(val, reg_name##6_el1); \
64 break; \
65 case 7: \
66 write_sysreg(val, reg_name##7_el1); \
67 break; \
68 case 8: \
69 write_sysreg(val, reg_name##8_el1); \
70 break; \
71 case 9: \
72 write_sysreg(val, reg_name##9_el1); \
73 break; \
74 case 10: \
75 write_sysreg(val, reg_name##10_el1); \
76 break; \
77 case 11: \
78 write_sysreg(val, reg_name##11_el1); \
79 break; \
80 case 12: \
81 write_sysreg(val, reg_name##12_el1); \
82 break; \
83 case 13: \
84 write_sysreg(val, reg_name##13_el1); \
85 break; \
86 case 14: \
87 write_sysreg(val, reg_name##14_el1); \
88 break; \
89 case 15: \
90 write_sysreg(val, reg_name##15_el1); \
91 break; \
92 default: \
93 GUEST_ASSERT(0); \
94 } \
95}
96
97/* Define write_dbgbcr()/write_dbgbvr()/write_dbgwcr()/write_dbgwvr() */
98GEN_DEBUG_WRITE_REG(dbgbcr)
99GEN_DEBUG_WRITE_REG(dbgbvr)
100GEN_DEBUG_WRITE_REG(dbgwcr)
101GEN_DEBUG_WRITE_REG(dbgwvr)
102
103static void reset_debug_state(void)
104{
105 u8 brps, wrps, i;
106 u64 dfr0;
107
108 asm volatile("msr daifset, #8");
109
110 write_sysreg(0, osdlr_el1);
111 write_sysreg(0, oslar_el1);
112 isb();
113
114 write_sysreg(0, mdscr_el1);
115 write_sysreg(0, contextidr_el1);
116
117 /* Reset all bcr/bvr/wcr/wvr registers */
118 dfr0 = read_sysreg(id_aa64dfr0_el1);
119 brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0);
120 for (i = 0; i <= brps; i++) {
121 write_dbgbcr(i, 0);
122 write_dbgbvr(i, 0);
123 }
124 wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0);
125 for (i = 0; i <= wrps; i++) {
126 write_dbgwcr(i, 0);
127 write_dbgwvr(i, 0);
128 }
129
130 isb();
131}
132
133static void enable_os_lock(void)
134{
135 write_sysreg(1, oslar_el1);
136 isb();
137
138 GUEST_ASSERT(read_sysreg(oslsr_el1) & 2);
139}
140
141static void enable_monitor_debug_exceptions(void)
142{
143 u64 mdscr;
144
145 asm volatile("msr daifclr, #8");
146
147 mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
148 write_sysreg(mdscr, mdscr_el1);
149 isb();
150}
151
152static void install_wp(u8 wpn, u64 addr)
153{
154 u32 wcr;
155
156 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
157 write_dbgwcr(wpn, wcr);
158 write_dbgwvr(wpn, addr);
159
160 isb();
161
162 enable_monitor_debug_exceptions();
163}
164
165static void install_hw_bp(u8 bpn, u64 addr)
166{
167 u32 bcr;
168
169 bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
170 write_dbgbcr(bpn, bcr);
171 write_dbgbvr(bpn, addr);
172 isb();
173
174 enable_monitor_debug_exceptions();
175}
176
177static void install_wp_ctx(u8 addr_wp, u8 ctx_bp, u64 addr, u64 ctx)
178{
179 u32 wcr;
180 u64 ctx_bcr;
181
182 /* Setup a context-aware breakpoint for Linked Context ID Match */
183 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
184 DBGBCR_BT_CTX_LINK;
185 write_dbgbcr(ctx_bp, ctx_bcr);
186 write_dbgbvr(ctx_bp, ctx);
187
188 /* Setup a linked watchpoint (linked to the context-aware breakpoint) */
189 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
190 DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT);
191 write_dbgwcr(addr_wp, wcr);
192 write_dbgwvr(addr_wp, addr);
193 isb();
194
195 enable_monitor_debug_exceptions();
196}
197
198void install_hw_bp_ctx(u8 addr_bp, u8 ctx_bp, u64 addr, u64 ctx)
199{
200 u32 addr_bcr, ctx_bcr;
201
202 /* Setup a context-aware breakpoint for Linked Context ID Match */
203 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
204 DBGBCR_BT_CTX_LINK;
205 write_dbgbcr(ctx_bp, ctx_bcr);
206 write_dbgbvr(ctx_bp, ctx);
207
208 /*
209 * Setup a normal breakpoint for Linked Address Match, and link it
210 * to the context-aware breakpoint.
211 */
212 addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
213 DBGBCR_BT_ADDR_LINK_CTX |
214 ((u32)ctx_bp << DBGBCR_LBN_SHIFT);
215 write_dbgbcr(addr_bp, addr_bcr);
216 write_dbgbvr(addr_bp, addr);
217 isb();
218
219 enable_monitor_debug_exceptions();
220}
221
222static void install_ss(void)
223{
224 u64 mdscr;
225
226 asm volatile("msr daifclr, #8");
227
228 mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
229 write_sysreg(mdscr, mdscr_el1);
230 isb();
231}
232
233static volatile char write_data;
234
235static void guest_code(u8 bpn, u8 wpn, u8 ctx_bpn)
236{
237 u64 ctx = 0xabcdef; /* a random context number */
238
239 /* Software-breakpoint */
240 reset_debug_state();
241 asm volatile("sw_bp: brk #0");
242 GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
243
244 /* Hardware-breakpoint */
245 reset_debug_state();
246 install_hw_bp(bpn, PC(hw_bp));
247 asm volatile("hw_bp: nop");
248 GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
249
250 /* Hardware-breakpoint + svc */
251 reset_debug_state();
252 install_hw_bp(bpn, PC(bp_svc));
253 asm volatile("bp_svc: svc #0");
254 GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
255 GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
256
257 /* Hardware-breakpoint + software-breakpoint */
258 reset_debug_state();
259 install_hw_bp(bpn, PC(bp_brk));
260 asm volatile("bp_brk: brk #0");
261 GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
262 GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
263
264 /* Watchpoint */
265 reset_debug_state();
266 install_wp(wpn, PC(write_data));
267 write_data = 'x';
268 GUEST_ASSERT_EQ(write_data, 'x');
269 GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
270
271 /* Single-step */
272 reset_debug_state();
273 install_ss();
274 ss_idx = 0;
275 asm volatile("ss_start:\n"
276 "mrs x0, esr_el1\n"
277 "add x0, x0, #1\n"
278 "msr daifset, #8\n"
279 : : : "x0");
280 GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
281 GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
282 GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
283
284 /* OS Lock does not block software-breakpoint */
285 reset_debug_state();
286 enable_os_lock();
287 sw_bp_addr = 0;
288 asm volatile("sw_bp2: brk #0");
289 GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2));
290
291 /* OS Lock blocking hardware-breakpoint */
292 reset_debug_state();
293 enable_os_lock();
294 install_hw_bp(bpn, PC(hw_bp2));
295 hw_bp_addr = 0;
296 asm volatile("hw_bp2: nop");
297 GUEST_ASSERT_EQ(hw_bp_addr, 0);
298
299 /* OS Lock blocking watchpoint */
300 reset_debug_state();
301 enable_os_lock();
302 write_data = '\0';
303 wp_data_addr = 0;
304 install_wp(wpn, PC(write_data));
305 write_data = 'x';
306 GUEST_ASSERT_EQ(write_data, 'x');
307 GUEST_ASSERT_EQ(wp_data_addr, 0);
308
309 /* OS Lock blocking single-step */
310 reset_debug_state();
311 enable_os_lock();
312 ss_addr[0] = 0;
313 install_ss();
314 ss_idx = 0;
315 asm volatile("mrs x0, esr_el1\n\t"
316 "add x0, x0, #1\n\t"
317 "msr daifset, #8\n\t"
318 : : : "x0");
319 GUEST_ASSERT_EQ(ss_addr[0], 0);
320
321 /* Linked hardware-breakpoint */
322 hw_bp_addr = 0;
323 reset_debug_state();
324 install_hw_bp_ctx(bpn, ctx_bpn, PC(hw_bp_ctx), ctx);
325 /* Set context id */
326 write_sysreg(ctx, contextidr_el1);
327 isb();
328 asm volatile("hw_bp_ctx: nop");
329 write_sysreg(0, contextidr_el1);
330 GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp_ctx));
331
332 /* Linked watchpoint */
333 reset_debug_state();
334 install_wp_ctx(wpn, ctx_bpn, PC(write_data), ctx);
335 /* Set context id */
336 write_sysreg(ctx, contextidr_el1);
337 isb();
338 write_data = 'x';
339 GUEST_ASSERT_EQ(write_data, 'x');
340 GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
341
342 GUEST_DONE();
343}
344
345static void guest_sw_bp_handler(struct ex_regs *regs)
346{
347 sw_bp_addr = regs->pc;
348 regs->pc += 4;
349}
350
351static void guest_hw_bp_handler(struct ex_regs *regs)
352{
353 hw_bp_addr = regs->pc;
354 regs->pstate |= SPSR_D;
355}
356
357static void guest_wp_handler(struct ex_regs *regs)
358{
359 wp_data_addr = read_sysreg(far_el1);
360 wp_addr = regs->pc;
361 regs->pstate |= SPSR_D;
362}
363
364static void guest_ss_handler(struct ex_regs *regs)
365{
366 __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%lu'", ss_idx);
367 ss_addr[ss_idx++] = regs->pc;
368 regs->pstate |= SPSR_SS;
369}
370
371static void guest_svc_handler(struct ex_regs *regs)
372{
373 svc_addr = regs->pc;
374}
375
376static void guest_code_ss(int test_cnt)
377{
378 u64 i;
379 u64 bvr, wvr, w_bvr, w_wvr;
380
381 for (i = 0; i < test_cnt; i++) {
382 /* Bits [1:0] of dbg{b,w}vr are RES0 */
383 w_bvr = i << 2;
384 w_wvr = i << 2;
385
386 /*
387 * Enable Single Step execution. Note! This _must_ be a bare
388 * ucall as the ucall() path uses atomic operations to manage
389 * the ucall structures, and the built-in "atomics" are usually
390 * implemented via exclusive access instructions. The exlusive
391 * monitor is cleared on ERET, and so taking debug exceptions
392 * during a LDREX=>STREX sequence will prevent forward progress
393 * and hang the guest/test.
394 */
395 GUEST_UCALL_NONE();
396
397 /*
398 * The userspace will verify that the pc is as expected during
399 * single step execution between iter_ss_begin and iter_ss_end.
400 */
401 asm volatile("iter_ss_begin:nop\n");
402
403 write_sysreg(w_bvr, dbgbvr0_el1);
404 write_sysreg(w_wvr, dbgwvr0_el1);
405 bvr = read_sysreg(dbgbvr0_el1);
406 wvr = read_sysreg(dbgwvr0_el1);
407
408 /* Userspace disables Single Step when the end is nigh. */
409 asm volatile("iter_ss_end:\n");
410
411 GUEST_ASSERT_EQ(bvr, w_bvr);
412 GUEST_ASSERT_EQ(wvr, w_wvr);
413 }
414 GUEST_DONE();
415}
416
417static int debug_version(u64 id_aa64dfr0)
418{
419 return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
420}
421
422static void test_guest_debug_exceptions(u8 bpn, u8 wpn, u8 ctx_bpn)
423{
424 struct kvm_vcpu *vcpu;
425 struct kvm_vm *vm;
426 struct ucall uc;
427
428 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
429
430 vm_init_descriptor_tables(vm);
431 vcpu_init_descriptor_tables(vcpu);
432
433 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
434 ESR_ELx_EC_BRK64, guest_sw_bp_handler);
435 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
436 ESR_ELx_EC_BREAKPT_CUR, guest_hw_bp_handler);
437 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
438 ESR_ELx_EC_WATCHPT_CUR, guest_wp_handler);
439 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
440 ESR_ELx_EC_SOFTSTP_CUR, guest_ss_handler);
441 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
442 ESR_ELx_EC_SVC64, guest_svc_handler);
443
444 /* Specify bpn/wpn/ctx_bpn to be tested */
445 vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
446 pr_debug("Use bpn#%d, wpn#%d and ctx_bpn#%d\n", bpn, wpn, ctx_bpn);
447
448 vcpu_run(vcpu);
449 switch (get_ucall(vcpu, &uc)) {
450 case UCALL_ABORT:
451 REPORT_GUEST_ASSERT(uc);
452 break;
453 case UCALL_DONE:
454 goto done;
455 default:
456 TEST_FAIL("Unknown ucall %lu", uc.cmd);
457 }
458
459done:
460 kvm_vm_free(vm);
461}
462
463void test_single_step_from_userspace(int test_cnt)
464{
465 struct kvm_vcpu *vcpu;
466 struct kvm_vm *vm;
467 struct ucall uc;
468 struct kvm_run *run;
469 u64 pc, cmd;
470 u64 test_pc = 0;
471 bool ss_enable = false;
472 struct kvm_guest_debug debug = {};
473
474 vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
475 run = vcpu->run;
476 vcpu_args_set(vcpu, 1, test_cnt);
477
478 while (1) {
479 vcpu_run(vcpu);
480 if (run->exit_reason != KVM_EXIT_DEBUG) {
481 cmd = get_ucall(vcpu, &uc);
482 if (cmd == UCALL_ABORT) {
483 REPORT_GUEST_ASSERT(uc);
484 /* NOT REACHED */
485 } else if (cmd == UCALL_DONE) {
486 break;
487 }
488
489 TEST_ASSERT(cmd == UCALL_NONE,
490 "Unexpected ucall cmd 0x%lx", cmd);
491
492 debug.control = KVM_GUESTDBG_ENABLE |
493 KVM_GUESTDBG_SINGLESTEP;
494 ss_enable = true;
495 vcpu_guest_debug_set(vcpu, &debug);
496 continue;
497 }
498
499 TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
500
501 /* Check if the current pc is expected. */
502 pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
503 TEST_ASSERT(!test_pc || pc == test_pc,
504 "Unexpected pc 0x%lx (expected 0x%lx)",
505 pc, test_pc);
506
507 if ((pc + 4) == (u64)&iter_ss_end) {
508 test_pc = 0;
509 debug.control = KVM_GUESTDBG_ENABLE;
510 ss_enable = false;
511 vcpu_guest_debug_set(vcpu, &debug);
512 continue;
513 }
514
515 /*
516 * If the current pc is between iter_ss_bgin and
517 * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
518 * be the current pc + 4.
519 */
520 if ((pc >= (u64)&iter_ss_begin) &&
521 (pc < (u64)&iter_ss_end))
522 test_pc = pc + 4;
523 else
524 test_pc = 0;
525 }
526
527 kvm_vm_free(vm);
528}
529
530/*
531 * Run debug testing using the various breakpoint#, watchpoint# and
532 * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
533 */
534void test_guest_debug_exceptions_all(u64 aa64dfr0)
535{
536 u8 brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
537 int b, w, c;
538
539 /* Number of breakpoints */
540 brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1;
541 __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
542
543 /* Number of watchpoints */
544 wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1;
545
546 /* Number of context aware breakpoints */
547 ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1;
548
549 pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
550 brp_num, wrp_num, ctx_brp_num);
551
552 /* Number of normal (non-context aware) breakpoints */
553 normal_brp_num = brp_num - ctx_brp_num;
554
555 /* Lowest context aware breakpoint number */
556 ctx_brp_base = normal_brp_num;
557
558 /* Run tests with all supported breakpoints/watchpoints */
559 for (c = ctx_brp_base; c < ctx_brp_base + ctx_brp_num; c++) {
560 for (b = 0; b < normal_brp_num; b++) {
561 for (w = 0; w < wrp_num; w++)
562 test_guest_debug_exceptions(b, w, c);
563 }
564 }
565}
566
567static void help(char *name)
568{
569 puts("");
570 printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
571 puts("");
572 exit(0);
573}
574
575int main(int argc, char *argv[])
576{
577 struct kvm_vcpu *vcpu;
578 struct kvm_vm *vm;
579 int opt;
580 int ss_iteration = 10000;
581 u64 aa64dfr0;
582
583 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
584 aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
585 __TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
586 "Armv8 debug architecture not supported.");
587 kvm_vm_free(vm);
588
589 while ((opt = getopt(argc, argv, "i:")) != -1) {
590 switch (opt) {
591 case 'i':
592 ss_iteration = atoi_positive("Number of iterations", optarg);
593 break;
594 case 'h':
595 default:
596 help(argv[0]);
597 break;
598 }
599 }
600
601 test_guest_debug_exceptions_all(aa64dfr0);
602 test_single_step_from_userspace(ss_iteration);
603
604 return 0;
605}