Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86-urgent-2020-05-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A set of fixes for x86:

- Ensure that direct mapping alias is always flushed when changing
page attributes. The optimization for small ranges failed to do so
when the virtual address was in the vmalloc or module space.

- Unbreak the trace event registration for syscalls without arguments
caused by the refactoring of the SYSCALL_DEFINE0() macro.

- Move the printk in the TSC deadline timer code to a place where it
is guaranteed to only be called once during boot and cannot be
rearmed by clearing warn_once after boot. If it's invoked post boot
then lockdep rightfully complains about a potential deadlock as the
calling context is different.

- A series of fixes for objtool and the ORC unwinder addressing
variety of small issues:

- Stack offset tracking for indirect CFAs in objtool ignored
subsequent pushs and pops

- Repair the unwind hints in the register clearing entry ASM code

- Make the unwinding in the low level exit to usermode code stop
after switching to the trampoline stack. The unwind hint is no
longer valid and the ORC unwinder emits a warning as it can't
find the registers anymore.

- Fix unwind hints in switch_to_asm() and rewind_stack_do_exit()
which caused objtool to generate bogus ORC data.

- Prevent unwinder warnings when dumping the stack of a
non-current task as there is no way to be sure about the
validity because the dumped stack can be a moving target.

- Make the ORC unwinder behave the same way as the frame pointer
unwinder when dumping an inactive tasks stack and do not skip
the first frame.

- Prevent ORC unwinding before ORC data has been initialized

- Immediately terminate unwinding when a unknown ORC entry type
is found.

- Prevent premature stop of the unwinder caused by IRET frames.

- Fix another infinite loop in objtool caused by a negative
offset which was not catched.

- Address a few build warnings in the ORC unwinder and add
missing static/ro_after_init annotations"

* tag 'x86-urgent-2020-05-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/unwind/orc: Move ORC sorting variables under !CONFIG_MODULES
x86/apic: Move TSC deadline timer debug printk
ftrace/x86: Fix trace event registration for syscalls without arguments
x86/mm/cpa: Flush direct map alias during cpa
objtool: Fix infinite loop in for_offset_range()
x86/unwind/orc: Fix premature unwind stoppage due to IRET frames
x86/unwind/orc: Fix error path for bad ORC entry type
x86/unwind/orc: Prevent unwinding before ORC initialization
x86/unwind/orc: Don't skip the first frame for inactive tasks
x86/unwind: Prevent false warnings for non-current tasks
x86/unwind/orc: Convert global variables to static
x86/entry/64: Fix unwind hints in rewind_stack_do_exit()
x86/entry/64: Fix unwind hints in __switch_to_asm()
x86/entry/64: Fix unwind hints in kernel exit path
x86/entry/64: Fix unwind hints in register clearing code
objtool: Fix stack offset tracking for indirect CFAs

+138 -90
+21 -19
arch/x86/entry/calling.h
··· 98 98 #define SIZEOF_PTREGS 21*8 99 99 100 100 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 101 - /* 102 - * Push registers and sanitize registers of values that a 103 - * speculation attack might otherwise want to exploit. The 104 - * lower registers are likely clobbered well before they 105 - * could be put to use in a speculative execution gadget. 106 - * Interleave XOR with PUSH for better uop scheduling: 107 - */ 108 101 .if \save_ret 109 102 pushq %rsi /* pt_regs->si */ 110 103 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ ··· 107 114 pushq %rsi /* pt_regs->si */ 108 115 .endif 109 116 pushq \rdx /* pt_regs->dx */ 110 - xorl %edx, %edx /* nospec dx */ 111 117 pushq %rcx /* pt_regs->cx */ 112 - xorl %ecx, %ecx /* nospec cx */ 113 118 pushq \rax /* pt_regs->ax */ 114 119 pushq %r8 /* pt_regs->r8 */ 115 - xorl %r8d, %r8d /* nospec r8 */ 116 120 pushq %r9 /* pt_regs->r9 */ 117 - xorl %r9d, %r9d /* nospec r9 */ 118 121 pushq %r10 /* pt_regs->r10 */ 119 - xorl %r10d, %r10d /* nospec r10 */ 120 122 pushq %r11 /* pt_regs->r11 */ 121 - xorl %r11d, %r11d /* nospec r11*/ 122 123 pushq %rbx /* pt_regs->rbx */ 123 - xorl %ebx, %ebx /* nospec rbx*/ 124 124 pushq %rbp /* pt_regs->rbp */ 125 - xorl %ebp, %ebp /* nospec rbp*/ 126 125 pushq %r12 /* pt_regs->r12 */ 127 - xorl %r12d, %r12d /* nospec r12*/ 128 126 pushq %r13 /* pt_regs->r13 */ 129 - xorl %r13d, %r13d /* nospec r13*/ 130 127 pushq %r14 /* pt_regs->r14 */ 131 - xorl %r14d, %r14d /* nospec r14*/ 132 128 pushq %r15 /* pt_regs->r15 */ 133 - xorl %r15d, %r15d /* nospec r15*/ 134 129 UNWIND_HINT_REGS 130 + 135 131 .if \save_ret 136 132 pushq %rsi /* return address on top of stack */ 137 133 .endif 134 + 135 + /* 136 + * Sanitize registers of values that a speculation attack might 137 + * otherwise want to exploit. The lower registers are likely clobbered 138 + * well before they could be put to use in a speculative execution 139 + * gadget. 140 + */ 141 + xorl %edx, %edx /* nospec dx */ 142 + xorl %ecx, %ecx /* nospec cx */ 143 + xorl %r8d, %r8d /* nospec r8 */ 144 + xorl %r9d, %r9d /* nospec r9 */ 145 + xorl %r10d, %r10d /* nospec r10 */ 146 + xorl %r11d, %r11d /* nospec r11 */ 147 + xorl %ebx, %ebx /* nospec rbx */ 148 + xorl %ebp, %ebp /* nospec rbp */ 149 + xorl %r12d, %r12d /* nospec r12 */ 150 + xorl %r13d, %r13d /* nospec r13 */ 151 + xorl %r14d, %r14d /* nospec r14 */ 152 + xorl %r15d, %r15d /* nospec r15 */ 153 + 138 154 .endm 139 155 140 156 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
+7 -7
arch/x86/entry/entry_64.S
··· 249 249 */ 250 250 syscall_return_via_sysret: 251 251 /* rcx and r11 are already restored (see code above) */ 252 - UNWIND_HINT_EMPTY 253 252 POP_REGS pop_rdi=0 skip_r11rcx=1 254 253 255 254 /* ··· 257 258 */ 258 259 movq %rsp, %rdi 259 260 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 261 + UNWIND_HINT_EMPTY 260 262 261 263 pushq RSP-RDI(%rdi) /* RSP */ 262 264 pushq (%rdi) /* RDI */ ··· 279 279 * %rdi: prev task 280 280 * %rsi: next task 281 281 */ 282 - SYM_CODE_START(__switch_to_asm) 283 - UNWIND_HINT_FUNC 282 + SYM_FUNC_START(__switch_to_asm) 284 283 /* 285 284 * Save callee-saved registers 286 285 * This must match the order in inactive_task_frame ··· 320 321 popq %rbp 321 322 322 323 jmp __switch_to 323 - SYM_CODE_END(__switch_to_asm) 324 + SYM_FUNC_END(__switch_to_asm) 324 325 325 326 /* 326 327 * A newly forked process directly context switches into this address. ··· 511 512 * +----------------------------------------------------+ 512 513 */ 513 514 SYM_CODE_START(interrupt_entry) 514 - UNWIND_HINT_FUNC 515 + UNWIND_HINT_IRET_REGS offset=16 515 516 ASM_CLAC 516 517 cld 517 518 ··· 543 544 pushq 5*8(%rdi) /* regs->eflags */ 544 545 pushq 4*8(%rdi) /* regs->cs */ 545 546 pushq 3*8(%rdi) /* regs->ip */ 547 + UNWIND_HINT_IRET_REGS 546 548 pushq 2*8(%rdi) /* regs->orig_ax */ 547 549 pushq 8(%rdi) /* return address */ 548 - UNWIND_HINT_FUNC 549 550 550 551 movq (%rdi), %rdi 551 552 jmp 2f ··· 636 637 */ 637 638 movq %rsp, %rdi 638 639 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 640 + UNWIND_HINT_EMPTY 639 641 640 642 /* Copy the IRET frame to the trampoline stack. */ 641 643 pushq 6*8(%rdi) /* SS */ ··· 1739 1739 1740 1740 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 1741 1741 leaq -PTREGS_SIZE(%rax), %rsp 1742 - UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 1742 + UNWIND_HINT_REGS 1743 1743 1744 1744 call do_exit 1745 1745 SYM_CODE_END(rewind_stack_do_exit)
+3 -2
arch/x86/include/asm/ftrace.h
··· 61 61 { 62 62 /* 63 63 * Compare the symbol name with the system call name. Skip the 64 - * "__x64_sys", "__ia32_sys" or simple "sys" prefix. 64 + * "__x64_sys", "__ia32_sys", "__do_sys" or simple "sys" prefix. 65 65 */ 66 66 return !strcmp(sym + 3, name + 3) || 67 67 (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) || 68 - (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)); 68 + (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)) || 69 + (!strncmp(sym, "__do_sys", 8) && !strcmp(sym + 8, name + 3)); 69 70 } 70 71 71 72 #ifndef COMPILE_OFFSETS
+1 -1
arch/x86/include/asm/unwind.h
··· 19 19 #if defined(CONFIG_UNWINDER_ORC) 20 20 bool signal, full_regs; 21 21 unsigned long sp, bp, ip; 22 - struct pt_regs *regs; 22 + struct pt_regs *regs, *prev_regs; 23 23 #elif defined(CONFIG_UNWINDER_FRAME_POINTER) 24 24 bool got_irq; 25 25 unsigned long *bp, *orig_sp, ip;
+14 -13
arch/x86/kernel/apic/apic.c
··· 352 352 * According to Intel, MFENCE can do the serialization here. 353 353 */ 354 354 asm volatile("mfence" : : : "memory"); 355 - 356 - printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); 357 355 return; 358 356 } 359 357 ··· 544 546 }; 545 547 static DEFINE_PER_CPU(struct clock_event_device, lapic_events); 546 548 547 - static u32 hsx_deadline_rev(void) 549 + static __init u32 hsx_deadline_rev(void) 548 550 { 549 551 switch (boot_cpu_data.x86_stepping) { 550 552 case 0x02: return 0x3a; /* EP */ ··· 554 556 return ~0U; 555 557 } 556 558 557 - static u32 bdx_deadline_rev(void) 559 + static __init u32 bdx_deadline_rev(void) 558 560 { 559 561 switch (boot_cpu_data.x86_stepping) { 560 562 case 0x02: return 0x00000011; ··· 566 568 return ~0U; 567 569 } 568 570 569 - static u32 skx_deadline_rev(void) 571 + static __init u32 skx_deadline_rev(void) 570 572 { 571 573 switch (boot_cpu_data.x86_stepping) { 572 574 case 0x03: return 0x01000136; ··· 579 581 return ~0U; 580 582 } 581 583 582 - static const struct x86_cpu_id deadline_match[] = { 584 + static const struct x86_cpu_id deadline_match[] __initconst = { 583 585 X86_MATCH_INTEL_FAM6_MODEL( HASWELL_X, &hsx_deadline_rev), 584 586 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020), 585 587 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_D, &bdx_deadline_rev), ··· 601 603 {}, 602 604 }; 603 605 604 - static void apic_check_deadline_errata(void) 606 + static __init bool apic_validate_deadline_timer(void) 605 607 { 606 608 const struct x86_cpu_id *m; 607 609 u32 rev; 608 610 609 - if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || 610 - boot_cpu_has(X86_FEATURE_HYPERVISOR)) 611 - return; 611 + if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) 612 + return false; 613 + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 614 + return true; 612 615 613 616 m = x86_match_cpu(deadline_match); 614 617 if (!m) 615 - return; 618 + return true; 616 619 617 620 /* 618 621 * Function pointers will have the MSB set due to address layout, ··· 625 626 rev = (u32)m->driver_data; 626 627 627 628 if (boot_cpu_data.microcode >= rev) 628 - return; 629 + return true; 629 630 630 631 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 631 632 pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; " 632 633 "please update microcode to version: 0x%x (or later)\n", rev); 634 + return false; 633 635 } 634 636 635 637 /* ··· 2092 2092 { 2093 2093 unsigned int new_apicid; 2094 2094 2095 - apic_check_deadline_errata(); 2095 + if (apic_validate_deadline_timer()) 2096 + pr_debug("TSC deadline timer available\n"); 2096 2097 2097 2098 if (x2apic_mode) { 2098 2099 boot_cpu_physical_apicid = read_apic_id();
+2 -1
arch/x86/kernel/dumpstack_64.c
··· 183 183 */ 184 184 if (visit_mask) { 185 185 if (*visit_mask & (1UL << info->type)) { 186 - printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); 186 + if (task == current) 187 + printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); 187 188 goto unknown; 188 189 } 189 190 *visit_mask |= 1UL << info->type;
+3
arch/x86/kernel/unwind_frame.c
··· 344 344 if (IS_ENABLED(CONFIG_X86_32)) 345 345 goto the_end; 346 346 347 + if (state->task != current) 348 + goto the_end; 349 + 347 350 if (state->regs) { 348 351 printk_deferred_once(KERN_WARNING 349 352 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
+74 -39
arch/x86/kernel/unwind_orc.c
··· 8 8 #include <asm/orc_lookup.h> 9 9 10 10 #define orc_warn(fmt, ...) \ 11 - printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) 11 + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) 12 + 13 + #define orc_warn_current(args...) \ 14 + ({ \ 15 + if (state->task == current) \ 16 + orc_warn(args); \ 17 + }) 12 18 13 19 extern int __start_orc_unwind_ip[]; 14 20 extern int __stop_orc_unwind_ip[]; 15 21 extern struct orc_entry __start_orc_unwind[]; 16 22 extern struct orc_entry __stop_orc_unwind[]; 17 23 18 - static DEFINE_MUTEX(sort_mutex); 19 - int *cur_orc_ip_table = __start_orc_unwind_ip; 20 - struct orc_entry *cur_orc_table = __start_orc_unwind; 21 - 22 - unsigned int lookup_num_blocks; 23 - bool orc_init; 24 + static bool orc_init __ro_after_init; 25 + static unsigned int lookup_num_blocks __ro_after_init; 24 26 25 27 static inline unsigned long orc_ip(const int *ip) 26 28 { ··· 144 142 { 145 143 static struct orc_entry *orc; 146 144 147 - if (!orc_init) 148 - return NULL; 149 - 150 145 if (ip == 0) 151 146 return &null_orc_entry; 152 147 ··· 187 188 } 188 189 189 190 #ifdef CONFIG_MODULES 191 + 192 + static DEFINE_MUTEX(sort_mutex); 193 + static int *cur_orc_ip_table = __start_orc_unwind_ip; 194 + static struct orc_entry *cur_orc_table = __start_orc_unwind; 190 195 191 196 static void orc_sort_swap(void *_a, void *_b, int size) 192 197 { ··· 384 381 return true; 385 382 } 386 383 384 + /* 385 + * If state->regs is non-NULL, and points to a full pt_regs, just get the reg 386 + * value from state->regs. 387 + * 388 + * Otherwise, if state->regs just points to IRET regs, and the previous frame 389 + * had full regs, it's safe to get the value from the previous regs. This can 390 + * happen when early/late IRQ entry code gets interrupted by an NMI. 391 + */ 392 + static bool get_reg(struct unwind_state *state, unsigned int reg_off, 393 + unsigned long *val) 394 + { 395 + unsigned int reg = reg_off/8; 396 + 397 + if (!state->regs) 398 + return false; 399 + 400 + if (state->full_regs) { 401 + *val = ((unsigned long *)state->regs)[reg]; 402 + return true; 403 + } 404 + 405 + if (state->prev_regs) { 406 + *val = ((unsigned long *)state->prev_regs)[reg]; 407 + return true; 408 + } 409 + 410 + return false; 411 + } 412 + 387 413 bool unwind_next_frame(struct unwind_state *state) 388 414 { 389 - unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; 415 + unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; 390 416 enum stack_type prev_type = state->stack_info.type; 391 417 struct orc_entry *orc; 392 418 bool indirect = false; ··· 477 445 break; 478 446 479 447 case ORC_REG_R10: 480 - if (!state->regs || !state->full_regs) { 481 - orc_warn("missing regs for base reg R10 at ip %pB\n", 482 - (void *)state->ip); 448 + if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { 449 + orc_warn_current("missing R10 value at %pB\n", 450 + (void *)state->ip); 483 451 goto err; 484 452 } 485 - sp = state->regs->r10; 486 453 break; 487 454 488 455 case ORC_REG_R13: 489 - if (!state->regs || !state->full_regs) { 490 - orc_warn("missing regs for base reg R13 at ip %pB\n", 491 - (void *)state->ip); 456 + if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { 457 + orc_warn_current("missing R13 value at %pB\n", 458 + (void *)state->ip); 492 459 goto err; 493 460 } 494 - sp = state->regs->r13; 495 461 break; 496 462 497 463 case ORC_REG_DI: 498 - if (!state->regs || !state->full_regs) { 499 - orc_warn("missing regs for base reg DI at ip %pB\n", 500 - (void *)state->ip); 464 + if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { 465 + orc_warn_current("missing RDI value at %pB\n", 466 + (void *)state->ip); 501 467 goto err; 502 468 } 503 - sp = state->regs->di; 504 469 break; 505 470 506 471 case ORC_REG_DX: 507 - if (!state->regs || !state->full_regs) { 508 - orc_warn("missing regs for base reg DX at ip %pB\n", 509 - (void *)state->ip); 472 + if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { 473 + orc_warn_current("missing DX value at %pB\n", 474 + (void *)state->ip); 510 475 goto err; 511 476 } 512 - sp = state->regs->dx; 513 477 break; 514 478 515 479 default: 516 - orc_warn("unknown SP base reg %d for ip %pB\n", 480 + orc_warn("unknown SP base reg %d at %pB\n", 517 481 orc->sp_reg, (void *)state->ip); 518 482 goto err; 519 483 } ··· 532 504 533 505 state->sp = sp; 534 506 state->regs = NULL; 507 + state->prev_regs = NULL; 535 508 state->signal = false; 536 509 break; 537 510 538 511 case ORC_TYPE_REGS: 539 512 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { 540 - orc_warn("can't dereference registers at %p for ip %pB\n", 541 - (void *)sp, (void *)orig_ip); 513 + orc_warn_current("can't access registers at %pB\n", 514 + (void *)orig_ip); 542 515 goto err; 543 516 } 544 517 545 518 state->regs = (struct pt_regs *)sp; 519 + state->prev_regs = NULL; 546 520 state->full_regs = true; 547 521 state->signal = true; 548 522 break; 549 523 550 524 case ORC_TYPE_REGS_IRET: 551 525 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { 552 - orc_warn("can't dereference iret registers at %p for ip %pB\n", 553 - (void *)sp, (void *)orig_ip); 526 + orc_warn_current("can't access iret registers at %pB\n", 527 + (void *)orig_ip); 554 528 goto err; 555 529 } 556 530 531 + if (state->full_regs) 532 + state->prev_regs = state->regs; 557 533 state->regs = (void *)sp - IRET_FRAME_OFFSET; 558 534 state->full_regs = false; 559 535 state->signal = true; 560 536 break; 561 537 562 538 default: 563 - orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", 539 + orc_warn("unknown .orc_unwind entry type %d at %pB\n", 564 540 orc->type, (void *)orig_ip); 565 - break; 541 + goto err; 566 542 } 567 543 568 544 /* Find BP: */ 569 545 switch (orc->bp_reg) { 570 546 case ORC_REG_UNDEFINED: 571 - if (state->regs && state->full_regs) 572 - state->bp = state->regs->bp; 547 + if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) 548 + state->bp = tmp; 573 549 break; 574 550 575 551 case ORC_REG_PREV_SP: ··· 596 564 if (state->stack_info.type == prev_type && 597 565 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && 598 566 state->sp <= prev_sp) { 599 - orc_warn("stack going in the wrong direction? ip=%pB\n", 600 - (void *)orig_ip); 567 + orc_warn_current("stack going in the wrong direction? at %pB\n", 568 + (void *)orig_ip); 601 569 goto err; 602 570 } 603 571 ··· 617 585 void __unwind_start(struct unwind_state *state, struct task_struct *task, 618 586 struct pt_regs *regs, unsigned long *first_frame) 619 587 { 588 + if (!orc_init) 589 + goto done; 590 + 620 591 memset(state, 0, sizeof(*state)); 621 592 state->task = task; 622 593 ··· 686 651 /* Otherwise, skip ahead to the user-specified starting frame: */ 687 652 while (!unwind_done(state) && 688 653 (!on_stack(&state->stack_info, first_frame, sizeof(long)) || 689 - state->sp <= (unsigned long)first_frame)) 654 + state->sp < (unsigned long)first_frame)) 690 655 unwind_next_frame(state); 691 656 692 657 return;
+8 -4
arch/x86/mm/pat/set_memory.c
··· 43 43 unsigned long pfn; 44 44 unsigned int flags; 45 45 unsigned int force_split : 1, 46 - force_static_prot : 1; 46 + force_static_prot : 1, 47 + force_flush_all : 1; 47 48 struct page **pages; 48 49 }; 49 50 ··· 356 355 return; 357 356 } 358 357 359 - if (cpa->numpages <= tlb_single_page_flush_ceiling) 360 - on_each_cpu(__cpa_flush_tlb, cpa, 1); 361 - else 358 + if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling) 362 359 flush_tlb_all(); 360 + else 361 + on_each_cpu(__cpa_flush_tlb, cpa, 1); 363 362 364 363 if (!cache) 365 364 return; ··· 1599 1598 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 1600 1599 alias_cpa.curpage = 0; 1601 1600 1601 + cpa->force_flush_all = 1; 1602 + 1602 1603 ret = __change_page_attr_set_clr(&alias_cpa, 0); 1603 1604 if (ret) 1604 1605 return ret; ··· 1621 1618 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 1622 1619 alias_cpa.curpage = 0; 1623 1620 1621 + cpa->force_flush_all = 1; 1624 1622 /* 1625 1623 * The high mapping range is imprecise, so ignore the 1626 1624 * return value.
+1 -1
tools/objtool/check.c
··· 1460 1460 struct cfi_reg *cfa = &state->cfa; 1461 1461 struct stack_op *op = &insn->stack_op; 1462 1462 1463 - if (cfa->base != CFI_SP) 1463 + if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 1464 1464 return 0; 1465 1465 1466 1466 /* push */
+4 -3
tools/objtool/elf.h
··· 87 87 #define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS) 88 88 #define OFFSET_STRIDE_MASK (~(OFFSET_STRIDE - 1)) 89 89 90 - #define for_offset_range(_offset, _start, _end) \ 91 - for (_offset = ((_start) & OFFSET_STRIDE_MASK); \ 92 - _offset <= ((_end) & OFFSET_STRIDE_MASK); \ 90 + #define for_offset_range(_offset, _start, _end) \ 91 + for (_offset = ((_start) & OFFSET_STRIDE_MASK); \ 92 + _offset >= ((_start) & OFFSET_STRIDE_MASK) && \ 93 + _offset <= ((_end) & OFFSET_STRIDE_MASK); \ 93 94 _offset += OFFSET_STRIDE) 94 95 95 96 static inline u32 sec_offset_hash(struct section *sec, unsigned long offset)