Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

x86/paravirt: Move paravirt_sched_clock() related code into tsc.c

The only user of paravirt_sched_clock() is in tsc.c, so move the code
from paravirt.c and paravirt.h to tsc.c.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260105110520.21356-13-jgross@suse.com

authored by

Juergen Gross and committed by
Borislav Petkov (AMD)
39965afb 589f41f2

+14 -20
-12
arch/x86/include/asm/paravirt.h
··· 14 14 #ifndef __ASSEMBLER__ 15 15 #include <linux/types.h> 16 16 #include <linux/cpumask.h> 17 - #include <linux/static_call_types.h> 18 17 #include <asm/frame.h> 19 - 20 - u64 dummy_sched_clock(void); 21 - 22 - DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock); 23 - 24 - void paravirt_set_sched_clock(u64 (*func)(void)); 25 - 26 - static __always_inline u64 paravirt_sched_clock(void) 27 - { 28 - return static_call(pv_sched_clock)(); 29 - } 30 18 31 19 __visible void __native_queued_spin_unlock(struct qspinlock *lock); 32 20 bool pv_is_native_spin_unlock(void);
+1
arch/x86/include/asm/timer.h
··· 12 12 extern int no_timer_check; 13 13 14 14 extern bool using_native_sched_clock(void); 15 + void paravirt_set_sched_clock(u64 (*func)(void)); 15 16 16 17 /* 17 18 * We use the full linear equation: f(x) = a + b*x, in order to allow
+1
arch/x86/kernel/kvmclock.c
··· 19 19 #include <linux/cc_platform.h> 20 20 21 21 #include <asm/hypervisor.h> 22 + #include <asm/timer.h> 22 23 #include <asm/x86_init.h> 23 24 #include <asm/kvmclock.h> 24 25
-7
arch/x86/kernel/paravirt.c
··· 60 60 static_branch_enable(&virt_spin_lock_key); 61 61 } 62 62 63 - DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); 64 - 65 - void paravirt_set_sched_clock(u64 (*func)(void)) 66 - { 67 - static_call_update(pv_sched_clock, func); 68 - } 69 - 70 63 static noinstr void pv_native_safe_halt(void) 71 64 { 72 65 native_safe_halt();
+9 -1
arch/x86/kernel/tsc.c
··· 267 267 /* We need to define a real function for sched_clock, to override the 268 268 weak default version */ 269 269 #ifdef CONFIG_PARAVIRT 270 + DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); 271 + 270 272 noinstr u64 sched_clock_noinstr(void) 271 273 { 272 - return paravirt_sched_clock(); 274 + return static_call(pv_sched_clock)(); 273 275 } 274 276 275 277 bool using_native_sched_clock(void) 276 278 { 277 279 return static_call_query(pv_sched_clock) == native_sched_clock; 278 280 } 281 + 282 + void paravirt_set_sched_clock(u64 (*func)(void)) 283 + { 284 + static_call_update(pv_sched_clock, func); 285 + } 279 286 #else 280 287 u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock"))); 281 288 282 289 bool using_native_sched_clock(void) { return true; } 290 + void paravirt_set_sched_clock(u64 (*func)(void)) { } 283 291 #endif 284 292 285 293 notrace u64 sched_clock(void)
+1
arch/x86/xen/time.c
··· 19 19 #include <linux/sched/cputime.h> 20 20 21 21 #include <asm/pvclock.h> 22 + #include <asm/timer.h> 22 23 #include <asm/xen/hypervisor.h> 23 24 #include <asm/xen/hypercall.h> 24 25 #include <asm/xen/cpuid.h>
+2
drivers/clocksource/hyperv_timer.c
··· 535 535 sched_clock_register(sched_clock, 64, NSEC_PER_SEC); 536 536 } 537 537 #elif defined CONFIG_PARAVIRT 538 + #include <asm/timer.h> 539 + 538 540 static __always_inline void hv_setup_sched_clock(void *sched_clock) 539 541 { 540 542 /* We're on x86/x64 *and* using PV ops */