Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
"This update from the timer departement contains:

- A series of patches which address a shortcoming in the tick
broadcast code.

If the broadcast device is not available or an hrtimer emulated
broadcast device, some of the original assumptions lead to boot
failures. I rather plugged all of the corner cases instead of only
addressing the issue reported, so the change got a little larger.

Has been extensivly tested on x86 and arm.

- Get rid of the last holdouts using do_posix_clock_monotonic_gettime()

- A regression fix for the imx clocksource driver

- An update to the new state callbacks mechanism for clockevents.
This is required to simplify the conversion, which will take place
in 4.3"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
tick/broadcast: Prevent NULL pointer dereference
time: Get rid of do_posix_clock_monotonic_gettime
cris: Replace do_posix_clock_monotonic_gettime()
tick/broadcast: Unbreak CONFIG_GENERIC_CLOCKEVENTS=n build
tick/broadcast: Handle spurious interrupts gracefully
tick/broadcast: Check for hrtimer broadcast active early
tick/broadcast: Return busy when IPI is pending
tick/broadcast: Return busy if periodic mode and hrtimer broadcast
tick/broadcast: Move the check for periodic mode inside state handling
tick/broadcast: Prevent deep idle if no broadcast device available
tick/broadcast: Make idle check independent from mode and config
tick/broadcast: Sanity check the shutdown of the local clock_event
tick/broadcast: Prevent hrtimer recursion
clockevents: Allow set-state callbacks to be optional
clocksource/imx: Define clocksource for mx27

+155 -74
+1 -1
arch/cris/arch-v32/drivers/sync_serial.c
··· 1464 1464 if (port->write_ts_idx == NBR_IN_DESCR) 1465 1465 port->write_ts_idx = 0; 1466 1466 idx = port->write_ts_idx++; 1467 - do_posix_clock_monotonic_gettime(&port->timestamp[idx]); 1467 + ktime_get_ts(&port->timestamp[idx]); 1468 1468 port->in_buffer_len += port->inbufchunk; 1469 1469 } 1470 1470 spin_unlock_irqrestore(&port->lock, flags);
+1
drivers/clocksource/timer-imx-gpt.c
··· 529 529 530 530 CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 531 531 CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); 532 + CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt); 532 533 CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); 533 534 CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); 534 535 CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
+5 -2
include/linux/tick.h
··· 67 67 static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 68 68 #endif /* BROADCAST */ 69 69 70 - #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 70 + #ifdef CONFIG_GENERIC_CLOCKEVENTS 71 71 extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 72 72 #else 73 - static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } 73 + static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) 74 + { 75 + return 0; 76 + } 74 77 #endif 75 78 76 79 static inline void tick_broadcast_enable(void)
-1
include/linux/timekeeping.h
··· 145 145 } 146 146 #endif 147 147 148 - #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 149 148 #define ktime_get_real_ts64(ts) getnstimeofday64(ts) 150 149 151 150 /*
+9 -15
kernel/time/clockevents.c
··· 120 120 /* The clockevent device is getting replaced. Shut it down. */ 121 121 122 122 case CLOCK_EVT_STATE_SHUTDOWN: 123 - return dev->set_state_shutdown(dev); 123 + if (dev->set_state_shutdown) 124 + return dev->set_state_shutdown(dev); 125 + return 0; 124 126 125 127 case CLOCK_EVT_STATE_PERIODIC: 126 128 /* Core internal bug */ 127 129 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 128 130 return -ENOSYS; 129 - return dev->set_state_periodic(dev); 131 + if (dev->set_state_periodic) 132 + return dev->set_state_periodic(dev); 133 + return 0; 130 134 131 135 case CLOCK_EVT_STATE_ONESHOT: 132 136 /* Core internal bug */ 133 137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 134 138 return -ENOSYS; 135 - return dev->set_state_oneshot(dev); 139 + if (dev->set_state_oneshot) 140 + return dev->set_state_oneshot(dev); 141 + return 0; 136 142 137 143 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 138 144 /* Core internal bug */ ··· 476 470 477 471 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 478 472 return 0; 479 - 480 - /* New state-specific callbacks */ 481 - if (!dev->set_state_shutdown) 482 - return -EINVAL; 483 - 484 - if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && 485 - !dev->set_state_periodic) 486 - return -EINVAL; 487 - 488 - if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && 489 - !dev->set_state_oneshot) 490 - return -EINVAL; 491 473 492 474 return 0; 493 475 }
+108 -55
kernel/time/tick-broadcast.c
··· 159 159 { 160 160 struct clock_event_device *bc = tick_broadcast_device.evtdev; 161 161 unsigned long flags; 162 - int ret; 162 + int ret = 0; 163 163 164 164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 165 165 ··· 221 221 * If we kept the cpu in the broadcast mask, 222 222 * tell the caller to leave the per cpu device 223 223 * in shutdown state. The periodic interrupt 224 - * is delivered by the broadcast device. 224 + * is delivered by the broadcast device, if 225 + * the broadcast device exists and is not 226 + * hrtimer based. 225 227 */ 226 - ret = cpumask_test_cpu(cpu, tick_broadcast_mask); 228 + if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER)) 229 + ret = cpumask_test_cpu(cpu, tick_broadcast_mask); 227 230 break; 228 231 default: 229 - /* Nothing to do */ 230 - ret = 0; 231 232 break; 232 233 } 233 234 } ··· 266 265 * Check, if the current cpu is in the mask 267 266 */ 268 267 if (cpumask_test_cpu(cpu, mask)) { 268 + struct clock_event_device *bc = tick_broadcast_device.evtdev; 269 + 269 270 cpumask_clear_cpu(cpu, mask); 270 - local = true; 271 + /* 272 + * We only run the local handler, if the broadcast 273 + * device is not hrtimer based. Otherwise we run into 274 + * a hrtimer recursion. 275 + * 276 + * local timer_interrupt() 277 + * local_handler() 278 + * expire_hrtimers() 279 + * bc_handler() 280 + * local_handler() 281 + * expire_hrtimers() 282 + */ 283 + local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); 271 284 } 272 285 273 286 if (!cpumask_empty(mask)) { ··· 316 301 bool bc_local; 317 302 318 303 raw_spin_lock(&tick_broadcast_lock); 304 + 305 + /* Handle spurious interrupts gracefully */ 306 + if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { 307 + raw_spin_unlock(&tick_broadcast_lock); 308 + return; 309 + } 310 + 319 311 bc_local = tick_do_periodic_broadcast(); 320 312 321 313 if (clockevent_state_oneshot(dev)) { ··· 381 359 case TICK_BROADCAST_ON: 382 360 cpumask_set_cpu(cpu, tick_broadcast_on); 383 361 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { 384 - if (tick_broadcast_device.mode == 385 - TICKDEV_MODE_PERIODIC) 362 + /* 363 + * Only shutdown the cpu local device, if: 364 + * 365 + * - the broadcast device exists 366 + * - the broadcast device is not a hrtimer based one 367 + * - the broadcast device is in periodic mode to 368 + * avoid a hickup during switch to oneshot mode 369 + */ 370 + if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && 371 + tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 386 372 clockevents_shutdown(dev); 387 373 } 388 374 break; ··· 409 379 break; 410 380 } 411 381 412 - if (cpumask_empty(tick_broadcast_mask)) { 413 - if (!bc_stopped) 414 - clockevents_shutdown(bc); 415 - } else if (bc_stopped) { 416 - if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 417 - tick_broadcast_start_periodic(bc); 418 - else 419 - tick_broadcast_setup_oneshot(bc); 382 + if (bc) { 383 + if (cpumask_empty(tick_broadcast_mask)) { 384 + if (!bc_stopped) 385 + clockevents_shutdown(bc); 386 + } else if (bc_stopped) { 387 + if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 388 + tick_broadcast_start_periodic(bc); 389 + else 390 + tick_broadcast_setup_oneshot(bc); 391 + } 420 392 } 421 393 raw_spin_unlock(&tick_broadcast_lock); 422 394 } ··· 694 662 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 695 663 } 696 664 697 - /** 698 - * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode 699 - * @state: The target state (enter/exit) 700 - * 701 - * The system enters/leaves a state, where affected devices might stop 702 - * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. 703 - * 704 - * Called with interrupts disabled, so clockevents_lock is not 705 - * required here because the local clock event device cannot go away 706 - * under us. 707 - */ 708 - int tick_broadcast_oneshot_control(enum tick_broadcast_state state) 665 + int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) 709 666 { 710 667 struct clock_event_device *bc, *dev; 711 - struct tick_device *td; 712 668 int cpu, ret = 0; 713 669 ktime_t now; 714 670 715 671 /* 716 - * Periodic mode does not care about the enter/exit of power 717 - * states 672 + * If there is no broadcast device, tell the caller not to go 673 + * into deep idle. 718 674 */ 719 - if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 720 - return 0; 675 + if (!tick_broadcast_device.evtdev) 676 + return -EBUSY; 721 677 722 - /* 723 - * We are called with preemtion disabled from the depth of the 724 - * idle code, so we can't be moved away. 725 - */ 726 - td = this_cpu_ptr(&tick_cpu_device); 727 - dev = td->evtdev; 728 - 729 - if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 730 - return 0; 678 + dev = this_cpu_ptr(&tick_cpu_device)->evtdev; 731 679 732 680 raw_spin_lock(&tick_broadcast_lock); 733 681 bc = tick_broadcast_device.evtdev; 734 682 cpu = smp_processor_id(); 735 683 736 684 if (state == TICK_BROADCAST_ENTER) { 685 + /* 686 + * If the current CPU owns the hrtimer broadcast 687 + * mechanism, it cannot go deep idle and we do not add 688 + * the CPU to the broadcast mask. We don't have to go 689 + * through the EXIT path as the local timer is not 690 + * shutdown. 691 + */ 692 + ret = broadcast_needs_cpu(bc, cpu); 693 + if (ret) 694 + goto out; 695 + 696 + /* 697 + * If the broadcast device is in periodic mode, we 698 + * return. 699 + */ 700 + if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 701 + /* If it is a hrtimer based broadcast, return busy */ 702 + if (bc->features & CLOCK_EVT_FEAT_HRTIMER) 703 + ret = -EBUSY; 704 + goto out; 705 + } 706 + 737 707 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 738 708 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 709 + 710 + /* Conditionally shut down the local timer. */ 739 711 broadcast_shutdown_local(bc, dev); 712 + 740 713 /* 741 714 * We only reprogram the broadcast timer if we 742 715 * did not mark ourself in the force mask and 743 716 * if the cpu local event is earlier than the 744 717 * broadcast event. If the current CPU is in 745 718 * the force mask, then we are going to be 746 - * woken by the IPI right away. 719 + * woken by the IPI right away; we return 720 + * busy, so the CPU does not try to go deep 721 + * idle. 747 722 */ 748 - if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && 749 - dev->next_event.tv64 < bc->next_event.tv64) 723 + if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { 724 + ret = -EBUSY; 725 + } else if (dev->next_event.tv64 < bc->next_event.tv64) { 750 726 tick_broadcast_set_event(bc, cpu, dev->next_event); 727 + /* 728 + * In case of hrtimer broadcasts the 729 + * programming might have moved the 730 + * timer to this cpu. If yes, remove 731 + * us from the broadcast mask and 732 + * return busy. 733 + */ 734 + ret = broadcast_needs_cpu(bc, cpu); 735 + if (ret) { 736 + cpumask_clear_cpu(cpu, 737 + tick_broadcast_oneshot_mask); 738 + } 739 + } 751 740 } 752 - /* 753 - * If the current CPU owns the hrtimer broadcast 754 - * mechanism, it cannot go deep idle and we remove the 755 - * CPU from the broadcast mask. We don't have to go 756 - * through the EXIT path as the local timer is not 757 - * shutdown. 758 - */ 759 - ret = broadcast_needs_cpu(bc, cpu); 760 - if (ret) 761 - cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 762 741 } else { 763 742 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 764 743 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); ··· 981 938 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; 982 939 } 983 940 941 + #else 942 + int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) 943 + { 944 + struct clock_event_device *bc = tick_broadcast_device.evtdev; 945 + 946 + if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER)) 947 + return -EBUSY; 948 + 949 + return 0; 950 + } 984 951 #endif 985 952 986 953 void __init tick_broadcast_init(void)
+21
kernel/time/tick-common.c
··· 343 343 tick_install_broadcast_device(newdev); 344 344 } 345 345 346 + /** 347 + * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode 348 + * @state: The target state (enter/exit) 349 + * 350 + * The system enters/leaves a state, where affected devices might stop 351 + * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. 352 + * 353 + * Called with interrupts disabled, so clockevents_lock is not 354 + * required here because the local clock event device cannot go away 355 + * under us. 356 + */ 357 + int tick_broadcast_oneshot_control(enum tick_broadcast_state state) 358 + { 359 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 360 + 361 + if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP)) 362 + return 0; 363 + 364 + return __tick_broadcast_oneshot_control(state); 365 + } 366 + 346 367 #ifdef CONFIG_HOTPLUG_CPU 347 368 /* 348 369 * Transfer the do_timer job away from a dying cpu.
+10
kernel/time/tick-sched.h
··· 71 71 static inline void tick_cancel_sched_timer(int cpu) { } 72 72 #endif 73 73 74 + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 75 + extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state); 76 + #else 77 + static inline int 78 + __tick_broadcast_oneshot_control(enum tick_broadcast_state state) 79 + { 80 + return -EBUSY; 81 + } 82 + #endif 83 + 74 84 #endif