Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86-urgent-2020-09-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"Two fixes for the x86 interrupt code:

- Unbreak the magic 'search the timer interrupt' logic in IO/APIC
code which got wreckaged when the core interrupt code made the
state tracking logic stricter.

That caused the interrupt line to stay masked after switching from
IO/APIC to PIC delivery mode, which obviously prevents interrupts
from being delivered.

- Make run_on_irqstack_code() typesafe. The function argument is a
void pointer which is then cast to 'void (*fun)(void *).

This breaks Control Flow Integrity checking in clang. Use proper
helper functions for the three variants reuqired"

* tag 'x86-urgent-2020-09-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/ioapic: Unbreak check_timer()
x86/irq: Make run_on_irqstack_cond() typesafe

+69 -13
+1 -1
arch/x86/entry/common.c
··· 299 299 old_regs = set_irq_regs(regs); 300 300 301 301 instrumentation_begin(); 302 - run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs); 302 + run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); 303 303 instrumentation_begin(); 304 304 305 305 set_irq_regs(old_regs);
+2
arch/x86/entry/entry_64.S
··· 682 682 * rdx: Function argument (can be NULL if none) 683 683 */ 684 684 SYM_FUNC_START(asm_call_on_stack) 685 + SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL) 686 + SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) 685 687 /* 686 688 * Save the frame pointer unconditionally. This allows the ORC 687 689 * unwinder to handle the stack switch.
+1 -1
arch/x86/include/asm/idtentry.h
··· 242 242 instrumentation_begin(); \ 243 243 irq_enter_rcu(); \ 244 244 kvm_set_cpu_l1tf_flush_l1d(); \ 245 - run_on_irqstack_cond(__##func, regs, regs); \ 245 + run_sysvec_on_irqstack_cond(__##func, regs); \ 246 246 irq_exit_rcu(); \ 247 247 instrumentation_end(); \ 248 248 irqentry_exit(regs, state); \
+62 -9
arch/x86/include/asm/irq_stack.h
··· 12 12 return __this_cpu_read(irq_count) != -1; 13 13 } 14 14 15 - void asm_call_on_stack(void *sp, void *func, void *arg); 15 + void asm_call_on_stack(void *sp, void (*func)(void), void *arg); 16 + void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs), 17 + struct pt_regs *regs); 18 + void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc), 19 + struct irq_desc *desc); 16 20 17 - static __always_inline void __run_on_irqstack(void *func, void *arg) 21 + static __always_inline void __run_on_irqstack(void (*func)(void)) 18 22 { 19 23 void *tos = __this_cpu_read(hardirq_stack_ptr); 20 24 21 25 __this_cpu_add(irq_count, 1); 22 - asm_call_on_stack(tos - 8, func, arg); 26 + asm_call_on_stack(tos - 8, func, NULL); 27 + __this_cpu_sub(irq_count, 1); 28 + } 29 + 30 + static __always_inline void 31 + __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), 32 + struct pt_regs *regs) 33 + { 34 + void *tos = __this_cpu_read(hardirq_stack_ptr); 35 + 36 + __this_cpu_add(irq_count, 1); 37 + asm_call_sysvec_on_stack(tos - 8, func, regs); 38 + __this_cpu_sub(irq_count, 1); 39 + } 40 + 41 + static __always_inline void 42 + __run_irq_on_irqstack(void (*func)(struct irq_desc *desc), 43 + struct irq_desc *desc) 44 + { 45 + void *tos = __this_cpu_read(hardirq_stack_ptr); 46 + 47 + __this_cpu_add(irq_count, 1); 48 + asm_call_irq_on_stack(tos - 8, func, desc); 23 49 __this_cpu_sub(irq_count, 1); 24 50 } 25 51 26 52 #else /* CONFIG_X86_64 */ 27 53 static inline bool irqstack_active(void) { return false; } 28 - static inline void __run_on_irqstack(void *func, void *arg) { } 54 + static inline void __run_on_irqstack(void (*func)(void)) { } 55 + static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), 56 + struct pt_regs *regs) { } 57 + static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc), 58 + struct irq_desc *desc) { } 29 59 #endif /* !CONFIG_X86_64 */ 30 60 31 61 static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) ··· 67 37 return !user_mode(regs) && !irqstack_active(); 68 38 } 69 39 70 - static __always_inline void run_on_irqstack_cond(void *func, void *arg, 40 + 41 + static __always_inline void run_on_irqstack_cond(void (*func)(void), 71 42 struct pt_regs *regs) 72 43 { 73 - void (*__func)(void *arg) = func; 74 - 75 44 lockdep_assert_irqs_disabled(); 76 45 77 46 if (irq_needs_irq_stack(regs)) 78 - __run_on_irqstack(__func, arg); 47 + __run_on_irqstack(func); 79 48 else 80 - __func(arg); 49 + func(); 50 + } 51 + 52 + static __always_inline void 53 + run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs), 54 + struct pt_regs *regs) 55 + { 56 + lockdep_assert_irqs_disabled(); 57 + 58 + if (irq_needs_irq_stack(regs)) 59 + __run_sysvec_on_irqstack(func, regs); 60 + else 61 + func(regs); 62 + } 63 + 64 + static __always_inline void 65 + run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc, 66 + struct pt_regs *regs) 67 + { 68 + lockdep_assert_irqs_disabled(); 69 + 70 + if (irq_needs_irq_stack(regs)) 71 + __run_irq_on_irqstack(func, desc); 72 + else 73 + func(desc); 81 74 } 82 75 83 76 #endif
+1
arch/x86/kernel/apic/io_apic.c
··· 2243 2243 legacy_pic->init(0); 2244 2244 legacy_pic->make_irq(0); 2245 2245 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2246 + legacy_pic->unmask(0); 2246 2247 2247 2248 unlock_ExtINT_logic(); 2248 2249
+1 -1
arch/x86/kernel/irq.c
··· 227 227 struct pt_regs *regs) 228 228 { 229 229 if (IS_ENABLED(CONFIG_X86_64)) 230 - run_on_irqstack_cond(desc->handle_irq, desc, regs); 230 + run_irq_on_irqstack_cond(desc->handle_irq, desc, regs); 231 231 else 232 232 __handle_irq(desc, regs); 233 233 }
+1 -1
arch/x86/kernel/irq_64.c
··· 74 74 75 75 void do_softirq_own_stack(void) 76 76 { 77 - run_on_irqstack_cond(__do_softirq, NULL, NULL); 77 + run_on_irqstack_cond(__do_softirq, NULL); 78 78 }