Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'timers-urgent-2024-04-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Ingo Molnar:

- Address a (valid) W=1 build warning

- Fix timer self-tests

- Annotate a KCSAN warning wrt. accesses to the tick_do_timer_cpu
global variable

- Address a !CONFIG_BUG build warning

* tag 'timers-urgent-2024-04-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
selftests: kselftest: Fix build failure with NOLIBC
selftests: timers: Fix abs() warning in posix_timers test
selftests: kselftest: Mark functions that unconditionally call exit() as __noreturn
selftests: timers: Fix posix_timers ksft_print_msg() warning
selftests: timers: Fix valid-adjtimex signed left-shift undefined behavior
bug: Fix no-return-statement warning with !CONFIG_BUG
timekeeping: Use READ/WRITE_ONCE() for tick_do_timer_cpu
selftests/timers/posix_timers: Reimplement check_timer_distribution()
irqflags: Explicitly ignore lockdep_hrtimer_exit() argument

+147 -124
+4 -1
include/asm-generic/bug.h
··· 156 156 157 157 #else /* !CONFIG_BUG */ 158 158 #ifndef HAVE_ARCH_BUG 159 - #define BUG() do {} while (1) 159 + #define BUG() do { \ 160 + do {} while (1); \ 161 + unreachable(); \ 162 + } while (0) 160 163 #endif 161 164 162 165 #ifndef HAVE_ARCH_BUG_ON
+1 -1
include/linux/irqflags.h
··· 114 114 # define lockdep_softirq_enter() do { } while (0) 115 115 # define lockdep_softirq_exit() do { } while (0) 116 116 # define lockdep_hrtimer_enter(__hrtimer) false 117 - # define lockdep_hrtimer_exit(__context) do { } while (0) 117 + # define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0) 118 118 # define lockdep_posixtimer_enter() do { } while (0) 119 119 # define lockdep_posixtimer_exit() do { } while (0) 120 120 # define lockdep_irq_work_enter(__work) do { } while (0)
+9 -8
kernel/time/tick-common.c
··· 7 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 9 */ 10 + #include <linux/compiler.h> 10 11 #include <linux/cpu.h> 11 12 #include <linux/err.h> 12 13 #include <linux/hrtimer.h> ··· 85 84 */ 86 85 static void tick_periodic(int cpu) 87 86 { 88 - if (tick_do_timer_cpu == cpu) { 87 + if (READ_ONCE(tick_do_timer_cpu) == cpu) { 89 88 raw_spin_lock(&jiffies_lock); 90 89 write_seqcount_begin(&jiffies_seq); 91 90 ··· 216 215 * If no cpu took the do_timer update, assign it to 217 216 * this cpu: 218 217 */ 219 - if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { 220 - tick_do_timer_cpu = cpu; 218 + if (READ_ONCE(tick_do_timer_cpu) == TICK_DO_TIMER_BOOT) { 219 + WRITE_ONCE(tick_do_timer_cpu, cpu); 221 220 tick_next_period = ktime_get(); 222 221 #ifdef CONFIG_NO_HZ_FULL 223 222 /* ··· 233 232 !tick_nohz_full_cpu(cpu)) { 234 233 tick_take_do_timer_from_boot(); 235 234 tick_do_timer_boot_cpu = -1; 236 - WARN_ON(tick_do_timer_cpu != cpu); 235 + WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu); 237 236 #endif 238 237 } 239 238 ··· 407 406 int tick_cpu_dying(unsigned int dying_cpu) 408 407 { 409 408 /* 410 - * If the current CPU is the timekeeper, it's the only one that 411 - * can safely hand over its duty. Also all online CPUs are in 412 - * stop machine, guaranteed not to be idle, therefore it's safe 413 - * to pick any online successor. 409 + * If the current CPU is the timekeeper, it's the only one that can 410 + * safely hand over its duty. Also all online CPUs are in stop 411 + * machine, guaranteed not to be idle, therefore there is no 412 + * concurrency and it's safe to pick any online successor. 414 413 */ 415 414 if (tick_do_timer_cpu == dying_cpu) 416 415 tick_do_timer_cpu = cpumask_first(cpu_online_mask);
+22 -14
kernel/time/tick-sched.c
··· 8 8 * 9 9 * Started by: Thomas Gleixner and Ingo Molnar 10 10 */ 11 + #include <linux/compiler.h> 11 12 #include <linux/cpu.h> 12 13 #include <linux/err.h> 13 14 #include <linux/hrtimer.h> ··· 205 204 206 205 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) 207 206 { 208 - int cpu = smp_processor_id(); 207 + int tick_cpu, cpu = smp_processor_id(); 209 208 210 209 /* 211 210 * Check if the do_timer duty was dropped. We don't care about ··· 217 216 * If nohz_full is enabled, this should not happen because the 218 217 * 'tick_do_timer_cpu' CPU never relinquishes. 219 218 */ 220 - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && 221 - unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { 219 + tick_cpu = READ_ONCE(tick_do_timer_cpu); 220 + 221 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) { 222 222 #ifdef CONFIG_NO_HZ_FULL 223 223 WARN_ON_ONCE(tick_nohz_full_running); 224 224 #endif 225 - tick_do_timer_cpu = cpu; 225 + WRITE_ONCE(tick_do_timer_cpu, cpu); 226 + tick_cpu = cpu; 226 227 } 227 228 228 229 /* Check if jiffies need an update */ 229 - if (tick_do_timer_cpu == cpu) 230 + if (tick_cpu == cpu) 230 231 tick_do_update_jiffies64(now); 231 232 232 233 /* ··· 613 610 * timers, workqueues, timekeeping, ...) on behalf of full dynticks 614 611 * CPUs. It must remain online when nohz full is enabled. 615 612 */ 616 - if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 613 + if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu) 617 614 return false; 618 615 return true; 619 616 } ··· 894 891 { 895 892 u64 basemono, next_tick, delta, expires; 896 893 unsigned long basejiff; 894 + int tick_cpu; 897 895 898 896 basemono = get_jiffies_update(&basejiff); 899 897 ts->last_jiffies = basejiff; ··· 951 947 * Otherwise we can sleep as long as we want. 952 948 */ 953 949 delta = timekeeping_max_deferment(); 954 - if (cpu != tick_do_timer_cpu && 955 - (tick_do_timer_cpu != TICK_DO_TIMER_NONE || 956 - !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) 950 + tick_cpu = READ_ONCE(tick_do_timer_cpu); 951 + if (tick_cpu != cpu && 952 + (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) 957 953 delta = KTIME_MAX; 958 954 959 955 /* Calculate the next expiry time */ ··· 974 970 unsigned long basejiff = ts->last_jiffies; 975 971 u64 basemono = ts->timer_expires_base; 976 972 bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); 973 + int tick_cpu; 977 974 u64 expires; 978 975 979 976 /* Make sure we won't be trying to stop it twice in a row. */ ··· 1012 1007 * do_timer() never gets invoked. Keep track of the fact that it 1013 1008 * was the one which had the do_timer() duty last. 1014 1009 */ 1015 - if (cpu == tick_do_timer_cpu) { 1016 - tick_do_timer_cpu = TICK_DO_TIMER_NONE; 1010 + tick_cpu = READ_ONCE(tick_do_timer_cpu); 1011 + if (tick_cpu == cpu) { 1012 + WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE); 1017 1013 tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); 1018 - } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 1014 + } else if (tick_cpu != TICK_DO_TIMER_NONE) { 1019 1015 tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); 1020 1016 } 1021 1017 ··· 1179 1173 return false; 1180 1174 1181 1175 if (tick_nohz_full_enabled()) { 1176 + int tick_cpu = READ_ONCE(tick_do_timer_cpu); 1177 + 1182 1178 /* 1183 1179 * Keep the tick alive to guarantee timekeeping progression 1184 1180 * if there are full dynticks CPUs around 1185 1181 */ 1186 - if (tick_do_timer_cpu == cpu) 1182 + if (tick_cpu == cpu) 1187 1183 return false; 1188 1184 1189 1185 /* Should not happen for nohz-full */ 1190 - if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 1186 + if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE)) 1191 1187 return false; 1192 1188 } 1193 1189
+27 -6
tools/testing/selftests/kselftest.h
··· 51 51 #include <stdarg.h> 52 52 #include <string.h> 53 53 #include <stdio.h> 54 + #include <sys/utsname.h> 54 55 #endif 55 56 56 57 #ifndef ARRAY_SIZE ··· 80 79 #define KSFT_XPASS 3 81 80 #define KSFT_SKIP 4 82 81 82 + #ifndef __noreturn 83 + #define __noreturn __attribute__((__noreturn__)) 84 + #endif 83 85 #define __printf(a, b) __attribute__((format(printf, a, b))) 84 86 85 87 /* counters */ ··· 303 299 va_end(args); 304 300 } 305 301 306 - static inline int ksft_exit_pass(void) 302 + static inline __noreturn int ksft_exit_pass(void) 307 303 { 308 304 ksft_print_cnts(); 309 305 exit(KSFT_PASS); 310 306 } 311 307 312 - static inline int ksft_exit_fail(void) 308 + static inline __noreturn int ksft_exit_fail(void) 313 309 { 314 310 ksft_print_cnts(); 315 311 exit(KSFT_FAIL); ··· 336 332 ksft_cnt.ksft_xfail + \ 337 333 ksft_cnt.ksft_xskip) 338 334 339 - static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...) 335 + static inline __noreturn __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...) 340 336 { 341 337 int saved_errno = errno; 342 338 va_list args; ··· 351 347 exit(KSFT_FAIL); 352 348 } 353 349 354 - static inline int ksft_exit_xfail(void) 350 + static inline __noreturn int ksft_exit_xfail(void) 355 351 { 356 352 ksft_print_cnts(); 357 353 exit(KSFT_XFAIL); 358 354 } 359 355 360 - static inline int ksft_exit_xpass(void) 356 + static inline __noreturn int ksft_exit_xpass(void) 361 357 { 362 358 ksft_print_cnts(); 363 359 exit(KSFT_XPASS); 364 360 } 365 361 366 - static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...) 362 + static inline __noreturn __printf(1, 2) int ksft_exit_skip(const char *msg, ...) 367 363 { 368 364 int saved_errno = errno; 369 365 va_list args; ··· 390 386 if (ksft_test_num()) 391 387 ksft_print_cnts(); 392 388 exit(KSFT_SKIP); 389 + } 390 + 391 + static inline int ksft_min_kernel_version(unsigned int min_major, 392 + unsigned int min_minor) 393 + { 394 + #ifdef NOLIBC 395 + ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n"); 396 + return 0; 397 + #else 398 + unsigned int major, minor; 399 + struct utsname info; 400 + 401 + if (uname(&info) || sscanf(info.release, "%u.%u.", &major, &minor) != 2) 402 + ksft_exit_fail_msg("Can't parse kernel version\n"); 403 + 404 + return major > min_major || (major == min_major && minor >= min_minor); 405 + #endif 393 406 } 394 407 395 408 #endif /* __KSELFTEST_H */
+48 -57
tools/testing/selftests/timers/posix_timers.c
··· 66 66 diff = end.tv_usec - start.tv_usec; 67 67 diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC; 68 68 69 - if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) { 69 + if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) { 70 70 printf("Diff too high: %lld..", diff); 71 71 return -1; 72 72 } ··· 184 184 return 0; 185 185 } 186 186 187 - int remain; 188 - __thread int got_signal; 187 + static pthread_t ctd_thread; 188 + static volatile int ctd_count, ctd_failed; 189 189 190 - static void *distribution_thread(void *arg) 190 + static void ctd_sighandler(int sig) 191 191 { 192 - while (__atomic_load_n(&remain, __ATOMIC_RELAXED)); 193 - return NULL; 192 + if (pthread_self() != ctd_thread) 193 + ctd_failed = 1; 194 + ctd_count--; 194 195 } 195 196 196 - static void distribution_handler(int nr) 197 + static void *ctd_thread_func(void *arg) 197 198 { 198 - if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED)) 199 - __atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED); 200 - } 201 - 202 - /* 203 - * Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID 204 - * timer signals. This primarily tests that the kernel does not favour any one. 205 - */ 206 - static int check_timer_distribution(void) 207 - { 208 - int err, i; 209 - timer_t id; 210 - const int nthreads = 10; 211 - pthread_t threads[nthreads]; 212 199 struct itimerspec val = { 213 200 .it_value.tv_sec = 0, 214 201 .it_value.tv_nsec = 1000 * 1000, 215 202 .it_interval.tv_sec = 0, 216 203 .it_interval.tv_nsec = 1000 * 1000, 217 204 }; 205 + timer_t id; 218 206 219 - remain = nthreads + 1; /* worker threads + this thread */ 220 - signal(SIGALRM, distribution_handler); 221 - err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id); 222 - if (err < 0) { 223 - ksft_perror("Can't create timer"); 224 - return -1; 225 - } 226 - err = timer_settime(id, 0, &val, NULL); 227 - if (err < 0) { 228 - ksft_perror("Can't set timer"); 229 - return -1; 230 - } 207 + /* 1/10 seconds to ensure the leader sleeps */ 208 + usleep(10000); 231 209 232 - for (i = 0; i < nthreads; i++) { 233 - err = pthread_create(&threads[i], NULL, distribution_thread, 234 - NULL); 235 - if (err) { 236 - ksft_print_msg("Can't create thread: %s (%d)\n", 237 - strerror(errno), errno); 238 - return -1; 239 - } 240 - } 210 + ctd_count = 100; 211 + if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id)) 212 + return "Can't create timer\n"; 213 + if (timer_settime(id, 0, &val, NULL)) 214 + return "Can't set timer\n"; 241 215 242 - /* Wait for all threads to receive the signal. */ 243 - while (__atomic_load_n(&remain, __ATOMIC_RELAXED)); 216 + while (ctd_count > 0 && !ctd_failed) 217 + ; 244 218 245 - for (i = 0; i < nthreads; i++) { 246 - err = pthread_join(threads[i], NULL); 247 - if (err) { 248 - ksft_print_msg("Can't join thread: %s (%d)\n", 249 - strerror(errno), errno); 250 - return -1; 251 - } 252 - } 219 + if (timer_delete(id)) 220 + return "Can't delete timer\n"; 253 221 254 - if (timer_delete(id)) { 255 - ksft_perror("Can't delete timer"); 256 - return -1; 257 - } 222 + return NULL; 223 + } 258 224 259 - ksft_test_result_pass("check_timer_distribution\n"); 225 + /* 226 + * Test that only the running thread receives the timer signal. 227 + */ 228 + static int check_timer_distribution(void) 229 + { 230 + const char *errmsg; 231 + 232 + signal(SIGALRM, ctd_sighandler); 233 + 234 + errmsg = "Can't create thread\n"; 235 + if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL)) 236 + goto err; 237 + 238 + errmsg = "Can't join thread\n"; 239 + if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg) 240 + goto err; 241 + 242 + if (!ctd_failed) 243 + ksft_test_result_pass("check signal distribution\n"); 244 + else if (ksft_min_kernel_version(6, 3)) 245 + ksft_test_result_fail("check signal distribution\n"); 246 + else 247 + ksft_test_result_skip("check signal distribution (old kernel)\n"); 260 248 return 0; 249 + err: 250 + ksft_print_msg("%s", errmsg); 251 + return -1; 261 252 } 262 253 263 254 int main(int argc, char **argv)
+36 -37
tools/testing/selftests/timers/valid-adjtimex.c
··· 21 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 22 * GNU General Public License for more details. 23 23 */ 24 - 25 - 26 - 27 24 #include <stdio.h> 28 25 #include <stdlib.h> 29 26 #include <time.h> ··· 59 62 #define NUM_FREQ_OUTOFRANGE 4 60 63 #define NUM_FREQ_INVALID 2 61 64 65 + #define SHIFTED_PPM (1 << 16) 66 + 62 67 long valid_freq[NUM_FREQ_VALID] = { 63 - -499<<16, 64 - -450<<16, 65 - -400<<16, 66 - -350<<16, 67 - -300<<16, 68 - -250<<16, 69 - -200<<16, 70 - -150<<16, 71 - -100<<16, 72 - -75<<16, 73 - -50<<16, 74 - -25<<16, 75 - -10<<16, 76 - -5<<16, 77 - -1<<16, 68 + -499 * SHIFTED_PPM, 69 + -450 * SHIFTED_PPM, 70 + -400 * SHIFTED_PPM, 71 + -350 * SHIFTED_PPM, 72 + -300 * SHIFTED_PPM, 73 + -250 * SHIFTED_PPM, 74 + -200 * SHIFTED_PPM, 75 + -150 * SHIFTED_PPM, 76 + -100 * SHIFTED_PPM, 77 + -75 * SHIFTED_PPM, 78 + -50 * SHIFTED_PPM, 79 + -25 * SHIFTED_PPM, 80 + -10 * SHIFTED_PPM, 81 + -5 * SHIFTED_PPM, 82 + -1 * SHIFTED_PPM, 78 83 -1000, 79 - 1<<16, 80 - 5<<16, 81 - 10<<16, 82 - 25<<16, 83 - 50<<16, 84 - 75<<16, 85 - 100<<16, 86 - 150<<16, 87 - 200<<16, 88 - 250<<16, 89 - 300<<16, 90 - 350<<16, 91 - 400<<16, 92 - 450<<16, 93 - 499<<16, 84 + 1 * SHIFTED_PPM, 85 + 5 * SHIFTED_PPM, 86 + 10 * SHIFTED_PPM, 87 + 25 * SHIFTED_PPM, 88 + 50 * SHIFTED_PPM, 89 + 75 * SHIFTED_PPM, 90 + 100 * SHIFTED_PPM, 91 + 150 * SHIFTED_PPM, 92 + 200 * SHIFTED_PPM, 93 + 250 * SHIFTED_PPM, 94 + 300 * SHIFTED_PPM, 95 + 350 * SHIFTED_PPM, 96 + 400 * SHIFTED_PPM, 97 + 450 * SHIFTED_PPM, 98 + 499 * SHIFTED_PPM, 94 99 }; 95 100 96 101 long outofrange_freq[NUM_FREQ_OUTOFRANGE] = { 97 - -1000<<16, 98 - -550<<16, 99 - 550<<16, 100 - 1000<<16, 102 + -1000 * SHIFTED_PPM, 103 + -550 * SHIFTED_PPM, 104 + 550 * SHIFTED_PPM, 105 + 1000 * SHIFTED_PPM, 101 106 }; 102 107 103 108 #define LONG_MAX (~0UL>>1)