Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'timers-core-2026-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer core updates from Thomas Gleixner:

- Inline timecounter_cyc2time() as that is now used in the networking
hotpath. Inlining it significantly improves performance.

- Optimize the tick dependency check in case that the tracepoint is
disabled, which improves the hotpath performance in the tick
management code, which is a hotpath on transitions in and out of
idle.

- The usual cleanups and improvements

* tag 'timers-core-2026-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
time/kunit: Document handling of negative years of is_leap()
tick/nohz: Optimize check_tick_dependency() with early return
time/sched_clock: Use ACCESS_PRIVATE() to evaluate hrtimer::function
hrtimer: Drop _tv64() helpers
hrtimer: Remove public definition of HIGH_RES_NSEC
hrtimer: Remove unused resolution constants
time/timecounter: Inline timecounter_cyc2time()

+47 -77
-15
include/linux/hrtimer.h
··· 112 112 timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); 113 113 } 114 114 115 - static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) 116 - { 117 - timer->node.expires = tv64; 118 - timer->_softexpires = tv64; 119 - } 120 - 121 115 static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) 122 116 { 123 117 timer->node.expires = ktime_add_safe(timer->node.expires, time); ··· 130 136 } 131 137 132 138 static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) 133 - { 134 - return timer->_softexpires; 135 - } 136 - 137 - static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) 138 - { 139 - return timer->node.expires; 140 - } 141 - static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) 142 139 { 143 140 return timer->_softexpires; 144 141 }
-20
include/linux/hrtimer_defs.h
··· 6 6 #include <linux/timerqueue.h> 7 7 #include <linux/seqlock.h> 8 8 9 - #ifdef CONFIG_HIGH_RES_TIMERS 10 - 11 - /* 12 - * The resolution of the clocks. The resolution value is returned in 13 - * the clock_getres() system call to give application programmers an 14 - * idea of the (in)accuracy of timers. Timer values are rounded up to 15 - * this resolution values. 16 - */ 17 - # define HIGH_RES_NSEC 1 18 - # define KTIME_HIGH_RES (HIGH_RES_NSEC) 19 - # define MONOTONIC_RES_NSEC HIGH_RES_NSEC 20 - # define KTIME_MONOTONIC_RES KTIME_HIGH_RES 21 - 22 - #else 23 - 24 - # define MONOTONIC_RES_NSEC LOW_RES_NSEC 25 - # define KTIME_MONOTONIC_RES KTIME_LOW_RES 26 - 27 - #endif 28 - 29 9 #ifdef CONFIG_64BIT 30 10 # define __hrtimer_clock_base_align ____cacheline_aligned 31 11 #else
+29 -2
include/linux/timecounter.h
··· 115 115 */ 116 116 extern u64 timecounter_read(struct timecounter *tc); 117 117 118 + /* 119 + * This is like cyclecounter_cyc2ns(), but it is used for computing a 120 + * time previous to the time stored in the cycle counter. 121 + */ 122 + static inline u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, u64 cycles, u64 frac) 123 + { 124 + return ((cycles * cc->mult) - frac) >> cc->shift; 125 + } 126 + 118 127 /** 119 128 * timecounter_cyc2time - convert a cycle counter to same 120 129 * time base as values returned by ··· 140 131 * 141 132 * Returns: cycle counter converted to nanoseconds since the initial time stamp 142 133 */ 143 - extern u64 timecounter_cyc2time(const struct timecounter *tc, 144 - u64 cycle_tstamp); 134 + static inline u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp) 135 + { 136 + const struct cyclecounter *cc = tc->cc; 137 + u64 delta = (cycle_tstamp - tc->cycle_last) & cc->mask; 138 + u64 nsec = tc->nsec, frac = tc->frac; 139 + 140 + /* 141 + * Instead of always treating cycle_tstamp as more recent than 142 + * tc->cycle_last, detect when it is too far in the future and 143 + * treat it as old time stamp instead. 144 + */ 145 + if (unlikely(delta > cc->mask / 2)) { 146 + delta = (tc->cycle_last - cycle_tstamp) & cc->mask; 147 + nsec -= cc_cyc2ns_backwards(cc, delta, frac); 148 + } else { 149 + nsec += cyclecounter_cyc2ns(cc, delta, tc->mask, &frac); 150 + } 151 + 152 + return nsec; 153 + } 145 154 146 155 #endif
+11 -3
kernel/time/hrtimer.c
··· 50 50 #include "tick-internal.h" 51 51 52 52 /* 53 + * The resolution of the clocks. The resolution value is returned in 54 + * the clock_getres() system call to give application programmers an 55 + * idea of the (in)accuracy of timers. Timer values are rounded up to 56 + * this resolution values. 57 + */ 58 + #define HIGH_RES_NSEC 1 59 + 60 + /* 53 61 * Masks for selecting the soft and hard context timers from 54 62 * cpu_base->active 55 63 */ ··· 814 806 struct hrtimer_clock_base *base = timer->base; 815 807 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 816 808 817 - WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); 809 + WARN_ON_ONCE(hrtimer_get_expires(timer) < 0); 818 810 819 811 /* 820 812 * CLOCK_REALTIME timer might be requested with an absolute ··· 1061 1053 1062 1054 orun = ktime_divns(delta, incr); 1063 1055 hrtimer_add_expires_ns(timer, incr * orun); 1064 - if (hrtimer_get_expires_tv64(timer) > now) 1056 + if (hrtimer_get_expires(timer) > now) 1065 1057 return orun; 1066 1058 /* 1067 1059 * This (and the ktime_add() below) is the ··· 1843 1835 * are right-of a not yet expired timer, because that 1844 1836 * timer will have to trigger a wakeup anyway. 1845 1837 */ 1846 - if (basenow < hrtimer_get_softexpires_tv64(timer)) 1838 + if (basenow < hrtimer_get_softexpires(timer)) 1847 1839 break; 1848 1840 1849 1841 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
+1 -1
kernel/time/sched_clock.c
··· 215 215 216 216 update_clock_read_data(&rd); 217 217 218 - if (sched_clock_timer.function != NULL) { 218 + if (ACCESS_PRIVATE(&sched_clock_timer, function) != NULL) { 219 219 /* update timeout for clock wrap */ 220 220 hrtimer_start(&sched_clock_timer, cd.wrap_kt, 221 221 HRTIMER_MODE_REL_HARD);
+3
kernel/time/tick-sched.c
··· 344 344 { 345 345 int val = atomic_read(dep); 346 346 347 + if (likely(!tracepoint_enabled(tick_stop))) 348 + return !val; 349 + 347 350 if (val & TICK_DEP_MASK_POSIX_TIMER) { 348 351 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 349 352 return true;
+3 -1
kernel/time/time_test.c
··· 4 4 #include <linux/time.h> 5 5 6 6 /* 7 - * Traditional implementation of leap year evaluation. 7 + * Traditional implementation of leap year evaluation, but note that long 8 + * is a signed type and the tests do cover negative year values. So this 9 + * can't use the is_leap_year() helper from rtc.h. 8 10 */ 9 11 static bool is_leap(long year) 10 12 {
-35
kernel/time/timecounter.c
··· 62 62 } 63 63 EXPORT_SYMBOL_GPL(timecounter_read); 64 64 65 - /* 66 - * This is like cyclecounter_cyc2ns(), but it is used for computing a 67 - * time previous to the time stored in the cycle counter. 68 - */ 69 - static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, 70 - u64 cycles, u64 mask, u64 frac) 71 - { 72 - u64 ns = (u64) cycles; 73 - 74 - ns = ((ns * cc->mult) - frac) >> cc->shift; 75 - 76 - return ns; 77 - } 78 - 79 - u64 timecounter_cyc2time(const struct timecounter *tc, 80 - u64 cycle_tstamp) 81 - { 82 - u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; 83 - u64 nsec = tc->nsec, frac = tc->frac; 84 - 85 - /* 86 - * Instead of always treating cycle_tstamp as more recent 87 - * than tc->cycle_last, detect when it is too far in the 88 - * future and treat it as old time stamp instead. 89 - */ 90 - if (delta > tc->cc->mask / 2) { 91 - delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; 92 - nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); 93 - } else { 94 - nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); 95 - } 96 - 97 - return nsec; 98 - } 99 - EXPORT_SYMBOL_GPL(timecounter_cyc2time);