Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Fix an interrupt vector setup race which leads to a non-functioning
device

- Add new Intel CPU models *and* a family: 0x12. Finally. Yippie! :-)

* tag 'x86_urgent_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/irq: Plug vector setup race
x86/cpu: Add new Intel CPU model numbers for Wildcatlake and Novalake

+60 -20
+7 -5
arch/x86/include/asm/hw_irq.h
··· 92 92 93 93 extern struct irq_cfg *irq_cfg(unsigned int irq); 94 94 extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data); 95 - extern void lock_vector_lock(void); 96 - extern void unlock_vector_lock(void); 97 95 #ifdef CONFIG_SMP 98 96 extern void vector_schedule_cleanup(struct irq_cfg *); 99 97 extern void irq_complete_move(struct irq_cfg *cfg); ··· 99 101 static inline void vector_schedule_cleanup(struct irq_cfg *c) { } 100 102 static inline void irq_complete_move(struct irq_cfg *c) { } 101 103 #endif 102 - 103 104 extern void apic_ack_edge(struct irq_data *data); 104 - #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 105 + #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 106 + 107 + #ifdef CONFIG_X86_LOCAL_APIC 108 + extern void lock_vector_lock(void); 109 + extern void unlock_vector_lock(void); 110 + #else 105 111 static inline void lock_vector_lock(void) {} 106 112 static inline void unlock_vector_lock(void) {} 107 - #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 113 + #endif 108 114 109 115 /* Statistics */ 110 116 extern atomic_t irq_err_count;
+5
arch/x86/include/asm/intel-family.h
··· 150 150 151 151 #define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */ 152 152 153 + #define INTEL_WILDCATLAKE_L IFM(6, 0xD5) 154 + 155 + #define INTEL_NOVALAKE IFM(18, 0x01) 156 + #define INTEL_NOVALAKE_L IFM(18, 0x03) 157 + 153 158 /* "Small Core" Processors (Atom/E-Core) */ 154 159 155 160 #define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
+48 -15
arch/x86/kernel/irq.c
··· 256 256 __handle_irq(desc, regs); 257 257 } 258 258 259 - static __always_inline int call_irq_handler(int vector, struct pt_regs *regs) 259 + static struct irq_desc *reevaluate_vector(int vector) 260 260 { 261 - struct irq_desc *desc; 262 - int ret = 0; 261 + struct irq_desc *desc = __this_cpu_read(vector_irq[vector]); 263 262 264 - desc = __this_cpu_read(vector_irq[vector]); 263 + if (!IS_ERR_OR_NULL(desc)) 264 + return desc; 265 + 266 + if (desc == VECTOR_UNUSED) 267 + pr_emerg_ratelimited("No irq handler for %d.%u\n", smp_processor_id(), vector); 268 + else 269 + __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 270 + return NULL; 271 + } 272 + 273 + static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs) 274 + { 275 + struct irq_desc *desc = __this_cpu_read(vector_irq[vector]); 276 + 265 277 if (likely(!IS_ERR_OR_NULL(desc))) { 266 278 handle_irq(desc, regs); 267 - } else { 268 - ret = -EINVAL; 269 - if (desc == VECTOR_UNUSED) { 270 - pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n", 271 - __func__, smp_processor_id(), 272 - vector); 273 - } else { 274 - __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 275 - } 279 + return true; 276 280 } 277 281 278 - return ret; 282 + /* 283 + * Reevaluate with vector_lock held to prevent a race against 284 + * request_irq() setting up the vector: 285 + * 286 + * CPU0 CPU1 287 + * interrupt is raised in APIC IRR 288 + * but not handled 289 + * free_irq() 290 + * per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN; 291 + * 292 + * request_irq() common_interrupt() 293 + * d = this_cpu_read(vector_irq[vector]); 294 + * 295 + * per_cpu(vector_irq, CPU1)[vector] = desc; 296 + * 297 + * if (d == VECTOR_SHUTDOWN) 298 + * this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 299 + * 300 + * This requires that the same vector on the same target CPU is 301 + * handed out or that a spurious interrupt hits that CPU/vector. 302 + */ 303 + lock_vector_lock(); 304 + desc = reevaluate_vector(vector); 305 + unlock_vector_lock(); 306 + 307 + if (!desc) 308 + return false; 309 + 310 + handle_irq(desc, regs); 311 + return true; 279 312 } 280 313 281 314 /* ··· 322 289 /* entry code tells RCU that we're not quiescent. Check it. */ 323 290 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); 324 291 325 - if (unlikely(call_irq_handler(vector, regs))) 292 + if (unlikely(!call_irq_handler(vector, regs))) 326 293 apic_eoi(); 327 294 328 295 set_irq_regs(old_regs);