Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'timers_urgent_for_v6.7_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fix from Borislav Petkov:

- Do the push of pending hrtimers away from a CPU which is being
offlined earlier in the offlining process in order to prevent a
deadlock

* tag 'timers_urgent_for_v6.7_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
hrtimers: Push pending hrtimers away from outgoing CPU earlier

+22 -24
+1
include/linux/cpuhotplug.h
··· 195 195 CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, 196 196 CPUHP_AP_ARM64_ISNDEP_STARTING, 197 197 CPUHP_AP_SMPCFD_DYING, 198 + CPUHP_AP_HRTIMERS_DYING, 198 199 CPUHP_AP_X86_TBOOT_DYING, 199 200 CPUHP_AP_ARM_CACHE_B15_RAC_DYING, 200 201 CPUHP_AP_ONLINE,
+2 -2
include/linux/hrtimer.h
··· 531 531 532 532 int hrtimers_prepare_cpu(unsigned int cpu); 533 533 #ifdef CONFIG_HOTPLUG_CPU 534 - int hrtimers_dead_cpu(unsigned int cpu); 534 + int hrtimers_cpu_dying(unsigned int cpu); 535 535 #else 536 - #define hrtimers_dead_cpu NULL 536 + #define hrtimers_cpu_dying NULL 537 537 #endif 538 538 539 539 #endif
+7 -1
kernel/cpu.c
··· 2113 2113 [CPUHP_HRTIMERS_PREPARE] = { 2114 2114 .name = "hrtimers:prepare", 2115 2115 .startup.single = hrtimers_prepare_cpu, 2116 - .teardown.single = hrtimers_dead_cpu, 2116 + .teardown.single = NULL, 2117 2117 }, 2118 2118 [CPUHP_SMPCFD_PREPARE] = { 2119 2119 .name = "smpcfd:prepare", ··· 2205 2205 .startup.single = NULL, 2206 2206 .teardown.single = smpcfd_dying_cpu, 2207 2207 }, 2208 + [CPUHP_AP_HRTIMERS_DYING] = { 2209 + .name = "hrtimers:dying", 2210 + .startup.single = NULL, 2211 + .teardown.single = hrtimers_cpu_dying, 2212 + }, 2213 + 2208 2214 /* Entry state on starting. Interrupts enabled from here on. Transient 2209 2215 * state for synchronsization */ 2210 2216 [CPUHP_AP_ONLINE] = {
+12 -21
kernel/time/hrtimer.c
··· 2219 2219 } 2220 2220 } 2221 2221 2222 - int hrtimers_dead_cpu(unsigned int scpu) 2222 + int hrtimers_cpu_dying(unsigned int dying_cpu) 2223 2223 { 2224 2224 struct hrtimer_cpu_base *old_base, *new_base; 2225 - int i; 2225 + int i, ncpu = cpumask_first(cpu_active_mask); 2226 2226 2227 - BUG_ON(cpu_online(scpu)); 2228 - tick_cancel_sched_timer(scpu); 2227 + tick_cancel_sched_timer(dying_cpu); 2229 2228 2230 - /* 2231 - * this BH disable ensures that raise_softirq_irqoff() does 2232 - * not wakeup ksoftirqd (and acquire the pi-lock) while 2233 - * holding the cpu_base lock 2234 - */ 2235 - local_bh_disable(); 2236 - local_irq_disable(); 2237 - old_base = &per_cpu(hrtimer_bases, scpu); 2238 - new_base = this_cpu_ptr(&hrtimer_bases); 2229 + old_base = this_cpu_ptr(&hrtimer_bases); 2230 + new_base = &per_cpu(hrtimer_bases, ncpu); 2231 + 2239 2232 /* 2240 2233 * The caller is globally serialized and nobody else 2241 2234 * takes two locks at once, deadlock is not possible. 2242 2235 */ 2243 - raw_spin_lock(&new_base->lock); 2244 - raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 2236 + raw_spin_lock(&old_base->lock); 2237 + raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING); 2245 2238 2246 2239 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 2247 2240 migrate_hrtimer_list(&old_base->clock_base[i], ··· 2245 2252 * The migration might have changed the first expiring softirq 2246 2253 * timer on this CPU. Update it. 2247 2254 */ 2248 - hrtimer_update_softirq_timer(new_base, false); 2255 + __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT); 2256 + /* Tell the other CPU to retrigger the next event */ 2257 + smp_call_function_single(ncpu, retrigger_next_event, NULL, 0); 2249 2258 2250 - raw_spin_unlock(&old_base->lock); 2251 2259 raw_spin_unlock(&new_base->lock); 2260 + raw_spin_unlock(&old_base->lock); 2252 2261 2253 - /* Check, if we got expired work to do */ 2254 - __hrtimer_peek_ahead_timers(); 2255 - local_irq_enable(); 2256 - local_bh_enable(); 2257 2262 return 0; 2258 2263 } 2259 2264