Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timekeeping and timer updates from Thomas Gleixner:
"Core:

- Consolidation of the vDSO build infrastructure to address the
difficulties of cross-builds for ARM64 compat vDSO libraries by
restricting the exposure of header content to the vDSO build.

This is achieved by splitting out header content into separate
headers. which contain only the minimaly required information which
is necessary to build the vDSO. These new headers are included from
the kernel headers and the vDSO specific files.

- Enhancements to the generic vDSO library allowing more fine grained
control over the compiled in code, further reducing architecture
specific storage and preparing for adopting the generic library by
PPC.

- Cleanup and consolidation of the exit related code in posix CPU
timers.

- Small cleanups and enhancements here and there

Drivers:

- The obligatory new drivers: Ingenic JZ47xx and X1000 TCU support

- Correct the clock rate of PIT64b global clock

- setup_irq() cleanup

- Preparation for PWM and suspend support for the TI DM timer

- Expand the fttmr010 driver to support ast2600 systems

- The usual small fixes, enhancements and cleanups all over the
place"

* tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (80 commits)
Revert "clocksource/drivers/timer-probe: Avoid creating dead devices"
vdso: Fix clocksource.h macro detection
um: Fix header inclusion
arm64: vdso32: Enable Clang Compilation
lib/vdso: Enable common headers
arm: vdso: Enable arm to use common headers
x86/vdso: Enable x86 to use common headers
mips: vdso: Enable mips to use common headers
arm64: vdso32: Include common headers in the vdso library
arm64: vdso: Include common headers in the vdso library
arm64: Introduce asm/vdso/processor.h
arm64: vdso32: Code clean up
linux/elfnote.h: Replace elf.h with UAPI equivalent
scripts: Fix the inclusion order in modpost
common: Introduce processor.h
linux/ktime.h: Extract common header for vDSO
linux/jiffies.h: Extract common header for vDSO
linux/time64.h: Extract common header for vDSO
linux/time32.h: Extract common header for vDSO
linux/time.h: Extract common header for vDSO
...

+1290 -928
+1
Documentation/devicetree/bindings/timer/faraday,fttmr010.txt
··· 11 11 "moxa,moxart-timer", "faraday,fttmr010" 12 12 "aspeed,ast2400-timer" 13 13 "aspeed,ast2500-timer" 14 + "aspeed,ast2600-timer" 14 15 15 16 - reg : Should contain registers location and length 16 17 - interrupts : Should contain the three timer interrupts usually with
+1
Documentation/devicetree/bindings/timer/ingenic,tcu.txt
··· 10 10 * ingenic,jz4740-tcu 11 11 * ingenic,jz4725b-tcu 12 12 * ingenic,jz4770-tcu 13 + * ingenic,x1000-tcu 13 14 followed by "simple-mfd". 14 15 - reg: Should be the offset/length value corresponding to the TCU registers 15 16 - clocks: List of phandle & clock specifiers for clocks external to the TCU.
-1
arch/arm/Kconfig
··· 3 3 bool 4 4 default y 5 5 select ARCH_32BIT_OFF_T 6 - select ARCH_CLOCKSOURCE_DATA 7 6 select ARCH_HAS_BINFMT_FLAT 8 7 select ARCH_HAS_DEBUG_VIRTUAL if MMU 9 8 select ARCH_HAS_DEVMEM_IS_ALLOWED
+3 -4
arch/arm/include/asm/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _ASM_CLOCKSOURCE_H 2 3 #define _ASM_CLOCKSOURCE_H 3 4 4 - struct arch_clocksource_data { 5 - bool vdso_direct; /* Usable for direct VDSO access? */ 6 - }; 5 + #include <asm/vdso/clocksource.h> 7 6 8 - #endif 7 + #endif /* _ASM_CLOCKSOURCE_H */
+1 -19
arch/arm/include/asm/cp15.h
··· 50 50 51 51 #ifdef CONFIG_CPU_CP15 52 52 53 - #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ 54 - "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 55 - #define __ACCESS_CP15_64(Op1, CRm) \ 56 - "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 57 - 58 - #define __read_sysreg(r, w, c, t) ({ \ 59 - t __val; \ 60 - asm volatile(r " " c : "=r" (__val)); \ 61 - __val; \ 62 - }) 63 - #define read_sysreg(...) __read_sysreg(__VA_ARGS__) 64 - 65 - #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) 66 - #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) 67 - 68 - #define BPIALL __ACCESS_CP15(c7, 0, c5, 6) 69 - #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) 70 - 71 - #define CNTVCT __ACCESS_CP15_64(1, c14) 53 + #include <asm/vdso/cp15.h> 72 54 73 55 extern unsigned long cr_alignment; /* defined in entry-armv.S */ 74 56
+1 -10
arch/arm/include/asm/processor.h
··· 14 14 #include <asm/ptrace.h> 15 15 #include <asm/types.h> 16 16 #include <asm/unified.h> 17 + #include <asm/vdso/processor.h> 17 18 18 19 #ifdef __KERNEL__ 19 20 #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ ··· 85 84 extern void release_thread(struct task_struct *); 86 85 87 86 unsigned long get_wchan(struct task_struct *p); 88 - 89 - #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) 90 - #define cpu_relax() \ 91 - do { \ 92 - smp_mb(); \ 93 - __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \ 94 - } while (0) 95 - #else 96 - #define cpu_relax() barrier() 97 - #endif 98 87 99 88 #define task_pt_regs(p) \ 100 89 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+8
arch/arm/include/asm/vdso/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_VDSOCLOCKSOURCE_H 3 + #define __ASM_VDSOCLOCKSOURCE_H 4 + 5 + #define VDSO_ARCH_CLOCKMODES \ 6 + VDSO_CLOCKMODE_ARCHTIMER 7 + 8 + #endif /* __ASM_VDSOCLOCKSOURCE_H */
+38
arch/arm/include/asm/vdso/cp15.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 ARM Ltd. 4 + */ 5 + #ifndef __ASM_VDSO_CP15_H 6 + #define __ASM_VDSO_CP15_H 7 + 8 + #ifndef __ASSEMBLY__ 9 + 10 + #ifdef CONFIG_CPU_CP15 11 + 12 + #include <linux/stringify.h> 13 + 14 + #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ 15 + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 16 + #define __ACCESS_CP15_64(Op1, CRm) \ 17 + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 18 + 19 + #define __read_sysreg(r, w, c, t) ({ \ 20 + t __val; \ 21 + asm volatile(r " " c : "=r" (__val)); \ 22 + __val; \ 23 + }) 24 + #define read_sysreg(...) __read_sysreg(__VA_ARGS__) 25 + 26 + #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) 27 + #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) 28 + 29 + #define BPIALL __ACCESS_CP15(c7, 0, c5, 6) 30 + #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) 31 + 32 + #define CNTVCT __ACCESS_CP15_64(1, c14) 33 + 34 + #endif /* CONFIG_CPU_CP15 */ 35 + 36 + #endif /* __ASSEMBLY__ */ 37 + 38 + #endif /* __ASM_VDSO_CP15_H */
+17 -5
arch/arm/include/asm/vdso/gettimeofday.h
··· 7 7 8 8 #ifndef __ASSEMBLY__ 9 9 10 - #include <asm/barrier.h> 11 - #include <asm/cp15.h> 10 + #include <asm/errno.h> 12 11 #include <asm/unistd.h> 12 + #include <asm/vdso/cp15.h> 13 13 #include <uapi/linux/time.h> 14 14 15 15 #define VDSO_HAS_CLOCK_GETRES 1 ··· 106 106 return ret; 107 107 } 108 108 109 + static inline bool arm_vdso_hres_capable(void) 110 + { 111 + return IS_ENABLED(CONFIG_ARM_ARCH_TIMER); 112 + } 113 + #define __arch_vdso_hres_capable arm_vdso_hres_capable 114 + 109 115 static __always_inline u64 __arch_get_hw_counter(int clock_mode) 110 116 { 111 117 #ifdef CONFIG_ARM_ARCH_TIMER 112 118 u64 cycle_now; 113 119 114 - if (!clock_mode) 115 - return -EINVAL; 120 + /* 121 + * Core checks for mode already, so this raced against a concurrent 122 + * update. Return something. Core will do another round and then 123 + * see the mode change and fallback to the syscall. 124 + */ 125 + if (clock_mode == VDSO_CLOCKMODE_NONE) 126 + return 0; 116 127 117 128 isb(); 118 129 cycle_now = read_sysreg(CNTVCT); 119 130 120 131 return cycle_now; 121 132 #else 122 - return -EINVAL; /* use fallback */ 133 + /* Make GCC happy. This is compiled out anyway */ 134 + return 0; 123 135 #endif 124 136 } 125 137
+22
arch/arm/include/asm/vdso/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 ARM Ltd. 4 + */ 5 + #ifndef __ASM_VDSO_PROCESSOR_H 6 + #define __ASM_VDSO_PROCESSOR_H 7 + 8 + #ifndef __ASSEMBLY__ 9 + 10 + #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) 11 + #define cpu_relax() \ 12 + do { \ 13 + smp_mb(); \ 14 + __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \ 15 + } while (0) 16 + #else 17 + #define cpu_relax() barrier() 18 + #endif 19 + 20 + #endif /* __ASSEMBLY__ */ 21 + 22 + #endif /* __ASM_VDSO_PROCESSOR_H */
-35
arch/arm/include/asm/vdso/vsyscall.h
··· 11 11 extern struct vdso_data *vdso_data; 12 12 extern bool cntvct_ok; 13 13 14 - static __always_inline 15 - bool tk_is_cntvct(const struct timekeeper *tk) 16 - { 17 - if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) 18 - return false; 19 - 20 - if (!tk->tkr_mono.clock->archdata.vdso_direct) 21 - return false; 22 - 23 - return true; 24 - } 25 - 26 14 /* 27 15 * Update the vDSO data page to keep in sync with kernel timekeeping. 28 16 */ ··· 20 32 return vdso_data; 21 33 } 22 34 #define __arch_get_k_vdso_data __arm_get_k_vdso_data 23 - 24 - static __always_inline 25 - bool __arm_update_vdso_data(void) 26 - { 27 - return cntvct_ok; 28 - } 29 - #define __arch_update_vdso_data __arm_update_vdso_data 30 - 31 - static __always_inline 32 - int __arm_get_clock_mode(struct timekeeper *tk) 33 - { 34 - u32 __tk_is_cntvct = tk_is_cntvct(tk); 35 - 36 - return __tk_is_cntvct; 37 - } 38 - #define __arch_get_clock_mode __arm_get_clock_mode 39 - 40 - static __always_inline 41 - int __arm_use_vsyscall(struct vdso_data *vdata) 42 - { 43 - return vdata[CS_HRES_COARSE].clock_mode; 44 - } 45 - #define __arch_use_vsyscall __arm_use_vsyscall 46 35 47 36 static __always_inline 48 37 void __arm_sync_vdso_data(struct vdso_data *vdata)
-1
arch/arm64/Kconfig
··· 9 9 select ACPI_MCFG if (ACPI && PCI) 10 10 select ACPI_SPCR_TABLE if ACPI 11 11 select ACPI_PPTT if ACPI 12 - select ARCH_CLOCKSOURCE_DATA 13 12 select ARCH_HAS_DEBUG_VIRTUAL 14 13 select ARCH_HAS_DEVMEM_IS_ALLOWED 15 14 select ARCH_HAS_DMA_PREP_COHERENT
+1 -3
arch/arm64/include/asm/clocksource.h
··· 2 2 #ifndef _ASM_CLOCKSOURCE_H 3 3 #define _ASM_CLOCKSOURCE_H 4 4 5 - struct arch_clocksource_data { 6 - bool vdso_direct; /* Usable for direct VDSO access? */ 7 - }; 5 + #include <asm/vdso/clocksource.h> 8 6 9 7 #endif
+2 -5
arch/arm64/include/asm/processor.h
··· 28 28 #include <linux/string.h> 29 29 #include <linux/thread_info.h> 30 30 31 + #include <vdso/processor.h> 32 + 31 33 #include <asm/alternative.h> 32 34 #include <asm/cpufeature.h> 33 35 #include <asm/hw_breakpoint.h> ··· 257 255 extern void release_thread(struct task_struct *); 258 256 259 257 unsigned long get_wchan(struct task_struct *p); 260 - 261 - static inline void cpu_relax(void) 262 - { 263 - asm volatile("yield" ::: "memory"); 264 - } 265 258 266 259 /* Thread switching */ 267 260 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
+8
arch/arm64/include/asm/vdso/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_VDSOCLOCKSOURCE_H 3 + #define __ASM_VDSOCLOCKSOURCE_H 4 + 5 + #define VDSO_ARCH_CLOCKMODES \ 6 + VDSO_CLOCKMODE_ARCHTIMER 7 + 8 + #endif
+6 -15
arch/arm64/include/asm/vdso/compat_gettimeofday.h
··· 8 8 #ifndef __ASSEMBLY__ 9 9 10 10 #include <asm/unistd.h> 11 - #include <uapi/linux/time.h> 11 + #include <asm/errno.h> 12 12 13 13 #include <asm/vdso/compat_barrier.h> 14 - 15 - #define __VDSO_USE_SYSCALL ULLONG_MAX 16 14 17 15 #define VDSO_HAS_CLOCK_GETRES 1 18 16 ··· 76 78 register long ret asm ("r0"); 77 79 register long nr asm("r7") = __NR_compat_clock_getres_time64; 78 80 79 - /* The checks below are required for ABI consistency with arm */ 80 - if ((_clkid >= MAX_CLOCKS) && (_ts == NULL)) 81 - return -EINVAL; 82 - 83 81 asm volatile( 84 82 " swi #0\n" 85 83 : "=r" (ret) ··· 93 99 register long ret asm ("r0"); 94 100 register long nr asm("r7") = __NR_compat_clock_getres; 95 101 96 - /* The checks below are required for ABI consistency with arm */ 97 - if ((_clkid >= MAX_CLOCKS) && (_ts == NULL)) 98 - return -EINVAL; 99 - 100 102 asm volatile( 101 103 " swi #0\n" 102 104 : "=r" (ret) ··· 107 117 u64 res; 108 118 109 119 /* 110 - * clock_mode == 0 implies that vDSO are enabled otherwise 111 - * fallback on syscall. 120 + * Core checks for mode already, so this raced against a concurrent 121 + * update. Return something. Core will do another round and then 122 + * see the mode change and fallback to the syscall. 112 123 */ 113 - if (clock_mode) 114 - return __VDSO_USE_SYSCALL; 124 + if (clock_mode == VDSO_CLOCKMODE_NONE) 125 + return 0; 115 126 116 127 /* 117 128 * This isb() is required to prevent that the counter value
+5 -7
arch/arm64/include/asm/vdso/gettimeofday.h
··· 8 8 #ifndef __ASSEMBLY__ 9 9 10 10 #include <asm/unistd.h> 11 - #include <uapi/linux/time.h> 12 - 13 - #define __VDSO_USE_SYSCALL ULLONG_MAX 14 11 15 12 #define VDSO_HAS_CLOCK_GETRES 1 16 13 ··· 68 71 u64 res; 69 72 70 73 /* 71 - * clock_mode == 0 implies that vDSO are enabled otherwise 72 - * fallback on syscall. 74 + * Core checks for mode already, so this raced against a concurrent 75 + * update. Return something. Core will do another round and then 76 + * see the mode change and fallback to the syscall. 73 77 */ 74 - if (clock_mode) 75 - return __VDSO_USE_SYSCALL; 78 + if (clock_mode == VDSO_CLOCKMODE_NONE) 79 + return 0; 76 80 77 81 /* 78 82 * This isb() is required to prevent that the counter value
+17
arch/arm64/include/asm/vdso/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 ARM Ltd. 4 + */ 5 + #ifndef __ASM_VDSO_PROCESSOR_H 6 + #define __ASM_VDSO_PROCESSOR_H 7 + 8 + #ifndef __ASSEMBLY__ 9 + 10 + static inline void cpu_relax(void) 11 + { 12 + asm volatile("yield" ::: "memory"); 13 + } 14 + 15 + #endif /* __ASSEMBLY__ */ 16 + 17 + #endif /* __ASM_VDSO_PROCESSOR_H */
-9
arch/arm64/include/asm/vdso/vsyscall.h
··· 22 22 #define __arch_get_k_vdso_data __arm64_get_k_vdso_data 23 23 24 24 static __always_inline 25 - int __arm64_get_clock_mode(struct timekeeper *tk) 26 - { 27 - u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct; 28 - 29 - return use_syscall; 30 - } 31 - #define __arch_get_clock_mode __arm64_get_clock_mode 32 - 33 - static __always_inline 34 25 void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) 35 26 { 36 27 vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
-2
arch/arm64/kernel/vdso/vgettimeofday.c
··· 5 5 * Copyright (C) 2018 ARM Limited 6 6 * 7 7 */ 8 - #include <linux/time.h> 9 - #include <linux/types.h> 10 8 11 9 int __kernel_clock_gettime(clockid_t clock, 12 10 struct __kernel_timespec *ts)
+11
arch/arm64/kernel/vdso32/Makefile
··· 10 10 11 11 # Same as cc-*option, but using CC_COMPAT instead of CC 12 12 ifeq ($(CONFIG_CC_IS_CLANG), y) 13 + COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit)) 14 + COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) 15 + 16 + CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) 17 + CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR) 18 + CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments 19 + ifneq ($(COMPAT_GCC_TOOLCHAIN),) 20 + CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN) 21 + endif 22 + 13 23 CC_COMPAT ?= $(CC) 24 + CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) 14 25 else 15 26 CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc 16 27 endif
-14
arch/arm64/kernel/vdso32/vgettimeofday.c
··· 5 5 * Copyright (C) 2018 ARM Limited 6 6 * 7 7 */ 8 - #include <linux/time.h> 9 - #include <linux/types.h> 10 8 11 9 int __vdso_clock_gettime(clockid_t clock, 12 10 struct old_timespec32 *ts) 13 11 { 14 - /* The checks below are required for ABI consistency with arm */ 15 - if ((u32)ts >= TASK_SIZE_32) 16 - return -EFAULT; 17 - 18 12 return __cvdso_clock_gettime32(clock, ts); 19 13 } 20 14 21 15 int __vdso_clock_gettime64(clockid_t clock, 22 16 struct __kernel_timespec *ts) 23 17 { 24 - /* The checks below are required for ABI consistency with arm */ 25 - if ((u32)ts >= TASK_SIZE_32) 26 - return -EFAULT; 27 - 28 18 return __cvdso_clock_gettime(clock, ts); 29 19 } 30 20 ··· 27 37 int __vdso_clock_getres(clockid_t clock_id, 28 38 struct old_timespec32 *res) 29 39 { 30 - /* The checks below are required for ABI consistency with arm */ 31 - if ((u32)res >= TASK_SIZE_32) 32 - return -EFAULT; 33 - 34 40 return __cvdso_clock_getres_time32(clock_id, res); 35 41 } 36 42
-1
arch/mips/Kconfig
··· 4 4 default y 5 5 select ARCH_32BIT_OFF_T if !64BIT 6 6 select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT 7 - select ARCH_CLOCKSOURCE_DATA 8 7 select ARCH_HAS_FORTIFY_SOURCE 9 8 select ARCH_HAS_KCOV 10 9 select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
+1 -15
arch/mips/include/asm/clocksource.h
··· 3 3 * Copyright (C) 2015 Imagination Technologies 4 4 * Author: Alex Smith <alex.smith@imgtec.com> 5 5 */ 6 - 7 6 #ifndef __ASM_CLOCKSOURCE_H 8 7 #define __ASM_CLOCKSOURCE_H 9 8 10 - #include <linux/types.h> 11 - 12 - /* VDSO clocksources. */ 13 - #define VDSO_CLOCK_NONE 0 /* No suitable clocksource. */ 14 - #define VDSO_CLOCK_R4K 1 /* Use the coprocessor 0 count. */ 15 - #define VDSO_CLOCK_GIC 2 /* Use the GIC. */ 16 - 17 - /** 18 - * struct arch_clocksource_data - Architecture-specific clocksource information. 19 - * @vdso_clock_mode: Method the VDSO should use to access the clocksource. 20 - */ 21 - struct arch_clocksource_data { 22 - u8 vdso_clock_mode; 23 - }; 9 + #include <asm/vdso/clocksource.h> 24 10 25 11 #endif /* __ASM_CLOCKSOURCE_H */
+1 -15
arch/mips/include/asm/processor.h
··· 22 22 #include <asm/dsemul.h> 23 23 #include <asm/mipsregs.h> 24 24 #include <asm/prefetch.h> 25 + #include <asm/vdso/processor.h> 25 26 26 27 /* 27 28 * System setup and hardware flags.. ··· 385 384 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) 386 385 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) 387 386 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) 388 - 389 - #ifdef CONFIG_CPU_LOONGSON64 390 - /* 391 - * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a 392 - * tight read loop is executed, because reads take priority over writes & the 393 - * hardware (incorrectly) doesn't ensure that writes will eventually occur. 394 - * 395 - * Since spin loops of any kind should have a cpu_relax() in them, force an SFB 396 - * flush from cpu_relax() such that any pending writes will become visible as 397 - * expected. 398 - */ 399 - #define cpu_relax() smp_mb() 400 - #else 401 - #define cpu_relax() barrier() 402 - #endif 403 387 404 388 /* 405 389 * Return_address is a replacement for __builtin_return_address(count)
+9
arch/mips/include/asm/vdso/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef __ASM_VDSOCLOCKSOURCE_H 3 + #define __ASM_VDSOCLOCKSOURCE_H 4 + 5 + #define VDSO_ARCH_CLOCKMODES \ 6 + VDSO_CLOCKMODE_R4K, \ 7 + VDSO_CLOCKMODE_GIC 8 + 9 + #endif /* __ASM_VDSOCLOCKSOURCE_H */
+17 -24
arch/mips/include/asm/vdso/gettimeofday.h
··· 13 13 14 14 #ifndef __ASSEMBLY__ 15 15 16 - #include <linux/compiler.h> 17 - #include <linux/time.h> 18 - 19 16 #include <asm/vdso/vdso.h> 20 17 #include <asm/clocksource.h> 21 - #include <asm/io.h> 22 18 #include <asm/unistd.h> 23 19 #include <asm/vdso.h> 24 20 25 21 #define VDSO_HAS_CLOCK_GETRES 1 26 - 27 - #define __VDSO_USE_SYSCALL ULLONG_MAX 28 22 29 23 static __always_inline long gettimeofday_fallback( 30 24 struct __kernel_old_timeval *_tv, ··· 169 175 170 176 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) 171 177 { 172 - #ifdef CONFIG_CLKSRC_MIPS_GIC 173 - const struct vdso_data *data = get_vdso_data(); 174 - #endif 175 - u64 cycle_now; 176 - 177 - switch (clock_mode) { 178 178 #ifdef CONFIG_CSRC_R4K 179 - case VDSO_CLOCK_R4K: 180 - cycle_now = read_r4k_count(); 181 - break; 179 + if (clock_mode == VDSO_CLOCKMODE_R4K) 180 + return read_r4k_count(); 182 181 #endif 183 182 #ifdef CONFIG_CLKSRC_MIPS_GIC 184 - case VDSO_CLOCK_GIC: 185 - cycle_now = read_gic_count(data); 186 - break; 183 + if (clock_mode == VDSO_CLOCKMODE_GIC) 184 + return read_gic_count(get_vdso_data()); 187 185 #endif 188 - default: 189 - cycle_now = __VDSO_USE_SYSCALL; 190 - break; 191 - } 192 - 193 - return cycle_now; 186 + /* 187 + * Core checks mode already. So this raced against a concurrent 188 + * update. Return something. Core will do another round see the 189 + * change and fallback to syscall. 190 + */ 191 + return 0; 194 192 } 193 + 194 + static inline bool mips_vdso_hres_capable(void) 195 + { 196 + return IS_ENABLED(CONFIG_CSRC_R4K) || 197 + IS_ENABLED(CONFIG_CLKSRC_MIPS_GIC); 198 + } 199 + #define __arch_vdso_hres_capable mips_vdso_hres_capable 195 200 196 201 static __always_inline const struct vdso_data *__arch_get_vdso_data(void) 197 202 {
+27
arch/mips/include/asm/vdso/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 ARM Ltd. 4 + */ 5 + #ifndef __ASM_VDSO_PROCESSOR_H 6 + #define __ASM_VDSO_PROCESSOR_H 7 + 8 + #ifndef __ASSEMBLY__ 9 + 10 + #ifdef CONFIG_CPU_LOONGSON64 11 + /* 12 + * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a 13 + * tight read loop is executed, because reads take priority over writes & the 14 + * hardware (incorrectly) doesn't ensure that writes will eventually occur. 15 + * 16 + * Since spin loops of any kind should have a cpu_relax() in them, force an SFB 17 + * flush from cpu_relax() such that any pending writes will become visible as 18 + * expected. 19 + */ 20 + #define cpu_relax() smp_mb() 21 + #else 22 + #define cpu_relax() barrier() 23 + #endif 24 + 25 + #endif /* __ASSEMBLY__ */ 26 + 27 + #endif /* __ASM_VDSO_PROCESSOR_H */
-9
arch/mips/include/asm/vdso/vsyscall.h
··· 19 19 } 20 20 #define __arch_get_k_vdso_data __mips_get_k_vdso_data 21 21 22 - static __always_inline 23 - int __mips_get_clock_mode(struct timekeeper *tk) 24 - { 25 - u32 clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode; 26 - 27 - return clock_mode; 28 - } 29 - #define __arch_get_clock_mode __mips_get_clock_mode 30 - 31 22 /* The asm-generic header needs to be included after the definitions above */ 32 23 #include <asm-generic/vdso/vsyscall.h> 33 24
+1 -1
arch/mips/kernel/csrc-r4k.c
··· 78 78 * by the VDSO (HWREna is configured by configure_hwrena()). 79 79 */ 80 80 if (cpu_has_mips_r2_r6 && rdhwr_count_usable()) 81 - clocksource_mips.archdata.vdso_clock_mode = VDSO_CLOCK_R4K; 81 + clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K; 82 82 83 83 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); 84 84
-1
arch/x86/Kconfig
··· 57 57 select ACPI_LEGACY_TABLES_LOOKUP if ACPI 58 58 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI 59 59 select ARCH_32BIT_OFF_T if X86_32 60 - select ARCH_CLOCKSOURCE_DATA 61 60 select ARCH_CLOCKSOURCE_INIT 62 61 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI 63 62 select ARCH_HAS_DEBUG_VIRTUAL
+6 -2
arch/x86/entry/vdso/vma.c
··· 38 38 } 39 39 #undef EMIT_VVAR 40 40 41 + unsigned int vclocks_used __read_mostly; 42 + 41 43 #if defined(CONFIG_X86_64) 42 44 unsigned int __read_mostly vdso64_enabled = 1; 43 45 #endif ··· 221 219 } else if (sym_offset == image->sym_pvclock_page) { 222 220 struct pvclock_vsyscall_time_info *pvti = 223 221 pvclock_get_pvti_cpu0_va(); 224 - if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { 222 + if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) { 225 223 return vmf_insert_pfn_prot(vma, vmf->address, 226 224 __pa(pvti) >> PAGE_SHIFT, 227 225 pgprot_decrypted(vma->vm_page_prot)); ··· 229 227 } else if (sym_offset == image->sym_hvclock_page) { 230 228 struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page(); 231 229 232 - if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK)) 230 + if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK)) 233 231 return vmf_insert_pfn(vma, vmf->address, 234 232 virt_to_phys(tsc_pg) >> PAGE_SHIFT); 235 233 } else if (sym_offset == image->sym_timens_page) { ··· 447 445 448 446 static int __init init_vdso(void) 449 447 { 448 + BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32); 449 + 450 450 init_vdso_image(&vdso_image_64); 451 451 452 452 #ifdef CONFIG_X86_X32_ABI
+12 -8
arch/x86/include/asm/clocksource.h
··· 4 4 #ifndef _ASM_X86_CLOCKSOURCE_H 5 5 #define _ASM_X86_CLOCKSOURCE_H 6 6 7 - #define VCLOCK_NONE 0 /* No vDSO clock available. */ 8 - #define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ 9 - #define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */ 10 - #define VCLOCK_HVCLOCK 3 /* vDSO should use vread_hvclock. */ 11 - #define VCLOCK_MAX 3 7 + #include <asm/vdso/clocksource.h> 12 8 13 - struct arch_clocksource_data { 14 - int vclock_mode; 15 - }; 9 + extern unsigned int vclocks_used; 10 + 11 + static inline bool vclock_was_used(int vclock) 12 + { 13 + return READ_ONCE(vclocks_used) & (1U << vclock); 14 + } 15 + 16 + static inline void vclocks_set_used(unsigned int which) 17 + { 18 + WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << which)); 19 + } 16 20 17 21 #endif /* _ASM_X86_CLOCKSOURCE_H */
+3 -1
arch/x86/include/asm/mshyperv.h
··· 46 46 #define hv_set_reference_tsc(val) \ 47 47 wrmsrl(HV_X64_MSR_REFERENCE_TSC, val) 48 48 #define hv_set_clocksource_vdso(val) \ 49 - ((val).archdata.vclock_mode = VCLOCK_HVCLOCK) 49 + ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK) 50 + #define hv_enable_vdso_clocksource() \ 51 + vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); 50 52 #define hv_get_raw_timer() rdtsc_ordered() 51 53 52 54 void hyperv_callback_vector(void);
+1 -11
arch/x86/include/asm/processor.h
··· 26 26 #include <asm/fpu/types.h> 27 27 #include <asm/unwind_hints.h> 28 28 #include <asm/vmxfeatures.h> 29 + #include <asm/vdso/processor.h> 29 30 30 31 #include <linux/personality.h> 31 32 #include <linux/cache.h> ··· 676 675 cpuid(op, &eax, &ebx, &ecx, &edx); 677 676 678 677 return edx; 679 - } 680 - 681 - /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 682 - static __always_inline void rep_nop(void) 683 - { 684 - asm volatile("rep; nop" ::: "memory"); 685 - } 686 - 687 - static __always_inline void cpu_relax(void) 688 - { 689 - rep_nop(); 690 678 } 691 679 692 680 /*
+10
arch/x86/include/asm/vdso/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_VDSO_CLOCKSOURCE_H 3 + #define __ASM_VDSO_CLOCKSOURCE_H 4 + 5 + #define VDSO_ARCH_CLOCKMODES \ 6 + VDSO_CLOCKMODE_TSC, \ 7 + VDSO_CLOCKMODE_PVCLOCK, \ 8 + VDSO_CLOCKMODE_HVCLOCK 9 + 10 + #endif /* __ASM_VDSO_CLOCKSOURCE_H */
+3 -3
arch/x86/include/asm/vdso/gettimeofday.h
··· 243 243 244 244 static inline u64 __arch_get_hw_counter(s32 clock_mode) 245 245 { 246 - if (clock_mode == VCLOCK_TSC) 246 + if (likely(clock_mode == VDSO_CLOCKMODE_TSC)) 247 247 return (u64)rdtsc_ordered(); 248 248 /* 249 249 * For any memory-mapped vclock type, we need to make sure that gcc ··· 252 252 * question isn't enabled, which will segfault. Hence the barriers. 253 253 */ 254 254 #ifdef CONFIG_PARAVIRT_CLOCK 255 - if (clock_mode == VCLOCK_PVCLOCK) { 255 + if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) { 256 256 barrier(); 257 257 return vread_pvclock(); 258 258 } 259 259 #endif 260 260 #ifdef CONFIG_HYPERV_TIMER 261 - if (clock_mode == VCLOCK_HVCLOCK) { 261 + if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) { 262 262 barrier(); 263 263 return vread_hvclock(); 264 264 }
+23
arch/x86/include/asm/vdso/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 ARM Ltd. 4 + */ 5 + #ifndef __ASM_VDSO_PROCESSOR_H 6 + #define __ASM_VDSO_PROCESSOR_H 7 + 8 + #ifndef __ASSEMBLY__ 9 + 10 + /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 11 + static __always_inline void rep_nop(void) 12 + { 13 + asm volatile("rep; nop" ::: "memory"); 14 + } 15 + 16 + static __always_inline void cpu_relax(void) 17 + { 18 + rep_nop(); 19 + } 20 + 21 + #endif /* __ASSEMBLY__ */ 22 + 23 + #endif /* __ASM_VDSO_PROCESSOR_H */
-15
arch/x86/include/asm/vdso/vsyscall.h
··· 10 10 #include <asm/vgtod.h> 11 11 #include <asm/vvar.h> 12 12 13 - int vclocks_used __read_mostly; 14 - 15 13 DEFINE_VVAR(struct vdso_data, _vdso_data); 16 14 /* 17 15 * Update the vDSO data page to keep in sync with kernel timekeeping. ··· 20 22 return _vdso_data; 21 23 } 22 24 #define __arch_get_k_vdso_data __x86_get_k_vdso_data 23 - 24 - static __always_inline 25 - int __x86_get_clock_mode(struct timekeeper *tk) 26 - { 27 - int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; 28 - 29 - /* Mark the new vclock used. */ 30 - BUILD_BUG_ON(VCLOCK_MAX >= 32); 31 - WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode)); 32 - 33 - return vclock_mode; 34 - } 35 - #define __arch_get_clock_mode __x86_get_clock_mode 36 25 37 26 /* The asm-generic header needs to be included after the definitions above */ 38 27 #include <asm-generic/vdso/vsyscall.h>
+6 -6
arch/x86/include/asm/vgtod.h
··· 2 2 #ifndef _ASM_X86_VGTOD_H 3 3 #define _ASM_X86_VGTOD_H 4 4 5 + /* 6 + * This check is required to prevent ARCH=um to include 7 + * unwanted headers. 8 + */ 9 + #ifdef CONFIG_GENERIC_GETTIMEOFDAY 5 10 #include <linux/compiler.h> 6 11 #include <asm/clocksource.h> 7 12 #include <vdso/datapage.h> ··· 19 14 #else 20 15 typedef unsigned long gtod_long_t; 21 16 #endif 22 - 23 - extern int vclocks_used; 24 - static inline bool vclock_was_used(int vclock) 25 - { 26 - return READ_ONCE(vclocks_used) & (1 << vclock); 27 - } 17 + #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ 28 18 29 19 #endif /* _ASM_X86_VGTOD_H */
+8 -1
arch/x86/kernel/kvmclock.c
··· 159 159 return ret; 160 160 } 161 161 162 + static int kvm_cs_enable(struct clocksource *cs) 163 + { 164 + vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK); 165 + return 0; 166 + } 167 + 162 168 struct clocksource kvm_clock = { 163 169 .name = "kvm-clock", 164 170 .read = kvm_clock_get_cycles, 165 171 .rating = 400, 166 172 .mask = CLOCKSOURCE_MASK(64), 167 173 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 174 + .enable = kvm_cs_enable, 168 175 }; 169 176 EXPORT_SYMBOL_GPL(kvm_clock); 170 177 ··· 279 272 if (!(flags & PVCLOCK_TSC_STABLE_BIT)) 280 273 return 0; 281 274 282 - kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; 275 + kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK; 283 276 #endif 284 277 285 278 kvmclock_init_mem();
+1 -1
arch/x86/kernel/pvclock.c
··· 145 145 146 146 void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti) 147 147 { 148 - WARN_ON(vclock_was_used(VCLOCK_PVCLOCK)); 148 + WARN_ON(vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)); 149 149 pvti_cpu0_va = pvti; 150 150 } 151 151
+3 -9
arch/x86/kernel/time.c
··· 122 122 */ 123 123 void clocksource_arch_init(struct clocksource *cs) 124 124 { 125 - if (cs->archdata.vclock_mode == VCLOCK_NONE) 125 + if (cs->vdso_clock_mode == VDSO_CLOCKMODE_NONE) 126 126 return; 127 127 128 - if (cs->archdata.vclock_mode > VCLOCK_MAX) { 129 - pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n", 130 - cs->name, cs->archdata.vclock_mode); 131 - cs->archdata.vclock_mode = VCLOCK_NONE; 132 - } 133 - 134 128 if (cs->mask != CLOCKSOURCE_MASK(64)) { 135 - pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n", 129 + pr_warn("clocksource %s registered with invalid mask %016llx for VDSO. Disabling VDSO support.\n", 136 130 cs->name, cs->mask); 137 - cs->archdata.vclock_mode = VCLOCK_NONE; 131 + cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; 138 132 } 139 133 }
+20 -12
arch/x86/kernel/tsc.c
··· 1108 1108 sched_clock_tick_stable(); 1109 1109 } 1110 1110 1111 + static int tsc_cs_enable(struct clocksource *cs) 1112 + { 1113 + vclocks_set_used(VDSO_CLOCKMODE_TSC); 1114 + return 0; 1115 + } 1116 + 1111 1117 /* 1112 1118 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() 1113 1119 */ 1114 1120 static struct clocksource clocksource_tsc_early = { 1115 - .name = "tsc-early", 1116 - .rating = 299, 1117 - .read = read_tsc, 1118 - .mask = CLOCKSOURCE_MASK(64), 1119 - .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1121 + .name = "tsc-early", 1122 + .rating = 299, 1123 + .read = read_tsc, 1124 + .mask = CLOCKSOURCE_MASK(64), 1125 + .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1120 1126 CLOCK_SOURCE_MUST_VERIFY, 1121 - .archdata = { .vclock_mode = VCLOCK_TSC }, 1127 + .vdso_clock_mode = VDSO_CLOCKMODE_TSC, 1128 + .enable = tsc_cs_enable, 1122 1129 .resume = tsc_resume, 1123 1130 .mark_unstable = tsc_cs_mark_unstable, 1124 1131 .tick_stable = tsc_cs_tick_stable, ··· 1138 1131 * been found good. 1139 1132 */ 1140 1133 static struct clocksource clocksource_tsc = { 1141 - .name = "tsc", 1142 - .rating = 300, 1143 - .read = read_tsc, 1144 - .mask = CLOCKSOURCE_MASK(64), 1145 - .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1134 + .name = "tsc", 1135 + .rating = 300, 1136 + .read = read_tsc, 1137 + .mask = CLOCKSOURCE_MASK(64), 1138 + .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1146 1139 CLOCK_SOURCE_VALID_FOR_HRES | 1147 1140 CLOCK_SOURCE_MUST_VERIFY, 1148 - .archdata = { .vclock_mode = VCLOCK_TSC }, 1141 + .vdso_clock_mode = VDSO_CLOCKMODE_TSC, 1142 + .enable = tsc_cs_enable, 1149 1143 .resume = tsc_resume, 1150 1144 .mark_unstable = tsc_cs_mark_unstable, 1151 1145 .tick_stable = tsc_cs_tick_stable,
+2 -2
arch/x86/kvm/trace.h
··· 815 815 #ifdef CONFIG_X86_64 816 816 817 817 #define host_clocks \ 818 - {VCLOCK_NONE, "none"}, \ 819 - {VCLOCK_TSC, "tsc"} \ 818 + {VDSO_CLOCKMODE_NONE, "none"}, \ 819 + {VDSO_CLOCKMODE_TSC, "tsc"} \ 820 820 821 821 TRACE_EVENT(kvm_update_master_clock, 822 822 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
+11 -11
arch/x86/kvm/x86.c
··· 1634 1634 write_seqcount_begin(&vdata->seq); 1635 1635 1636 1636 /* copy pvclock gtod data */ 1637 - vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; 1637 + vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 1638 1638 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 1639 1639 vdata->clock.mask = tk->tkr_mono.mask; 1640 1640 vdata->clock.mult = tk->tkr_mono.mult; ··· 1642 1642 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 1643 1643 vdata->clock.offset = tk->tkr_mono.base; 1644 1644 1645 - vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->archdata.vclock_mode; 1645 + vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 1646 1646 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 1647 1647 vdata->raw_clock.mask = tk->tkr_raw.mask; 1648 1648 vdata->raw_clock.mult = tk->tkr_raw.mult; ··· 1843 1843 1844 1844 static inline int gtod_is_based_on_tsc(int mode) 1845 1845 { 1846 - return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK; 1846 + return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 1847 1847 } 1848 1848 1849 1849 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) ··· 1936 1936 * TSC is marked unstable when we're running on Hyper-V, 1937 1937 * 'TSC page' clocksource is good. 1938 1938 */ 1939 - if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK) 1939 + if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 1940 1940 return false; 1941 1941 #endif 1942 1942 return check_tsc_unstable(); ··· 2091 2091 u64 tsc_pg_val; 2092 2092 2093 2093 switch (clock->vclock_mode) { 2094 - case VCLOCK_HVCLOCK: 2094 + case VDSO_CLOCKMODE_HVCLOCK: 2095 2095 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2096 2096 tsc_timestamp); 2097 2097 if (tsc_pg_val != U64_MAX) { 2098 2098 /* TSC page valid */ 2099 - *mode = VCLOCK_HVCLOCK; 2099 + *mode = VDSO_CLOCKMODE_HVCLOCK; 2100 2100 v = (tsc_pg_val - clock->cycle_last) & 2101 2101 clock->mask; 2102 2102 } else { 2103 2103 /* TSC page invalid */ 2104 - *mode = VCLOCK_NONE; 2104 + *mode = VDSO_CLOCKMODE_NONE; 2105 2105 } 2106 2106 break; 2107 - case VCLOCK_TSC: 2108 - *mode = VCLOCK_TSC; 2107 + case VDSO_CLOCKMODE_TSC: 2108 + *mode = VDSO_CLOCKMODE_TSC; 2109 2109 *tsc_timestamp = read_tsc(); 2110 2110 v = (*tsc_timestamp - clock->cycle_last) & 2111 2111 clock->mask; 2112 2112 break; 2113 2113 default: 2114 - *mode = VCLOCK_NONE; 2114 + *mode = VDSO_CLOCKMODE_NONE; 2115 2115 } 2116 2116 2117 - if (*mode == VCLOCK_NONE) 2117 + if (*mode == VDSO_CLOCKMODE_NONE) 2118 2118 *tsc_timestamp = v = 0; 2119 2119 2120 2120 return v * clock->mult;
+22 -14
arch/x86/xen/time.c
··· 145 145 .notifier_call = xen_pvclock_gtod_notify, 146 146 }; 147 147 148 + static int xen_cs_enable(struct clocksource *cs) 149 + { 150 + vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK); 151 + return 0; 152 + } 153 + 148 154 static struct clocksource xen_clocksource __read_mostly = { 149 - .name = "xen", 150 - .rating = 400, 151 - .read = xen_clocksource_get_cycles, 152 - .mask = ~0, 153 - .flags = CLOCK_SOURCE_IS_CONTINUOUS, 155 + .name = "xen", 156 + .rating = 400, 157 + .read = xen_clocksource_get_cycles, 158 + .mask = CLOCKSOURCE_MASK(64), 159 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 160 + .enable = xen_cs_enable, 154 161 }; 155 162 156 163 /* ··· 419 412 ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); 420 413 421 414 /* 422 - * We don't disable VCLOCK_PVCLOCK entirely if it fails to register the 423 - * secondary time info with Xen or if we migrated to a host without the 424 - * necessary flags. On both of these cases what happens is either 425 - * process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT 426 - * bit set. Userspace checks the latter and if 0, it discards the data 427 - * in pvti and fallbacks to a system call for a reliable timestamp. 415 + * We don't disable VDSO_CLOCKMODE_PVCLOCK entirely if it fails to 416 + * register the secondary time info with Xen or if we migrated to a 417 + * host without the necessary flags. On both of these cases what 418 + * happens is either process seeing a zeroed out pvti or seeing no 419 + * PVCLOCK_TSC_STABLE_BIT bit set. Userspace checks the latter and 420 + * if 0, it discards the data in pvti and fallbacks to a system 421 + * call for a reliable timestamp. 428 422 */ 429 423 if (ret != 0) 430 424 pr_notice("Cannot restore secondary vcpu_time_info (err %d)", ··· 451 443 452 444 ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); 453 445 if (ret) { 454 - pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret); 446 + pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (err %d)\n", ret); 455 447 free_page((unsigned long)ti); 456 448 return; 457 449 } ··· 468 460 if (!ret) 469 461 free_page((unsigned long)ti); 470 462 471 - pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n"); 463 + pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (tsc unstable)\n"); 472 464 return; 473 465 } 474 466 475 467 xen_clock = ti; 476 468 pvclock_set_pvti_cpu0_va(xen_clock); 477 469 478 - xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK; 470 + xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK; 479 471 } 480 472 481 473 static void __init xen_time_init(void)
+8
drivers/clocksource/Kconfig
··· 697 697 help 698 698 Support for the timer/counter unit of the Ingenic JZ SoCs. 699 699 700 + config INGENIC_OST 701 + bool "Clocksource for Ingenic OS Timer" 702 + depends on MIPS || COMPILE_TEST 703 + depends on COMMON_CLK 704 + select MFD_SYSCON 705 + help 706 + Support for the Operating System Timer of the Ingenic JZ SoCs. 707 + 700 708 config MICROCHIP_PIT64B 701 709 bool "Microchip PIT64B support" 702 710 depends on OF || COMPILE_TEST
+1
drivers/clocksource/Makefile
··· 80 80 obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o 81 81 obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o 82 82 obj-$(CONFIG_H8300_TPU) += h8300_tpu.o 83 + obj-$(CONFIG_INGENIC_OST) += ingenic-ost.o 83 84 obj-$(CONFIG_INGENIC_TIMER) += ingenic-timer.o 84 85 obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o 85 86 obj-$(CONFIG_X86_NUMACHIP) += numachip.o
+8 -4
drivers/clocksource/arm_arch_timer.c
··· 69 69 static bool arch_timer_c3stop; 70 70 static bool arch_timer_mem_use_virtual; 71 71 static bool arch_counter_suspend_stop; 72 - static bool vdso_default = true; 72 + #ifdef CONFIG_GENERIC_GETTIMEOFDAY 73 + static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; 74 + #else 75 + static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE; 76 + #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ 73 77 74 78 static cpumask_t evtstrm_available = CPU_MASK_NONE; 75 79 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); ··· 564 560 * change both the default value and the vdso itself. 565 561 */ 566 562 if (wa->read_cntvct_el0) { 567 - clocksource_counter.archdata.vdso_direct = false; 568 - vdso_default = false; 563 + clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; 564 + vdso_default = VDSO_CLOCKMODE_NONE; 569 565 } 570 566 } 571 567 ··· 983 979 } 984 980 985 981 arch_timer_read_counter = rd; 986 - clocksource_counter.archdata.vdso_direct = vdso_default; 982 + clocksource_counter.vdso_clock_mode = vdso_default; 987 983 } else { 988 984 arch_timer_read_counter = arch_counter_get_cntvct_mem; 989 985 }
+2 -6
drivers/clocksource/bcm2835_timer.c
··· 31 31 void __iomem *compare; 32 32 int match_mask; 33 33 struct clock_event_device evt; 34 - struct irqaction act; 35 34 }; 36 35 37 36 static void __iomem *system_clock __read_mostly; ··· 112 113 timer->evt.features = CLOCK_EVT_FEAT_ONESHOT; 113 114 timer->evt.set_next_event = bcm2835_time_set_next_event; 114 115 timer->evt.cpumask = cpumask_of(0); 115 - timer->act.name = node->name; 116 - timer->act.flags = IRQF_TIMER | IRQF_SHARED; 117 - timer->act.dev_id = timer; 118 - timer->act.handler = bcm2835_time_interrupt; 119 116 120 - ret = setup_irq(irq, &timer->act); 117 + ret = request_irq(irq, bcm2835_time_interrupt, IRQF_TIMER | IRQF_SHARED, 118 + node->name, timer); 121 119 if (ret) { 122 120 pr_err("Can't set up timer IRQ\n"); 123 121 goto err_timer_free;
+3 -7
drivers/clocksource/bcm_kona_timer.c
··· 160 160 return IRQ_HANDLED; 161 161 } 162 162 163 - static struct irqaction kona_timer_irq = { 164 - .name = "Kona Timer Tick", 165 - .flags = IRQF_TIMER, 166 - .handler = kona_timer_interrupt, 167 - }; 168 - 169 163 static int __init kona_timer_init(struct device_node *node) 170 164 { 171 165 u32 freq; ··· 186 192 kona_timer_disable_and_clear(timers.tmr_regs); 187 193 188 194 kona_timer_clockevents_init(); 189 - setup_irq(timers.tmr_irq, &kona_timer_irq); 195 + if (request_irq(timers.tmr_irq, kona_timer_interrupt, IRQF_TIMER, 196 + "Kona Timer Tick", NULL)) 197 + pr_err("%s: request_irq() failed\n", "Kona Timer Tick"); 190 198 kona_timer_set_next_event((arch_timer_rate / HZ), NULL); 191 199 192 200 return 0;
+3 -8
drivers/clocksource/dw_apb_timer.c
··· 270 270 dw_ced->ced.rating = rating; 271 271 dw_ced->ced.name = name; 272 272 273 - dw_ced->irqaction.name = dw_ced->ced.name; 274 - dw_ced->irqaction.handler = dw_apb_clockevent_irq; 275 - dw_ced->irqaction.dev_id = &dw_ced->ced; 276 - dw_ced->irqaction.irq = irq; 277 - dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | 278 - IRQF_NOBALANCING; 279 - 280 273 dw_ced->eoi = apbt_eoi; 281 - err = setup_irq(irq, &dw_ced->irqaction); 274 + err = request_irq(irq, dw_apb_clockevent_irq, 275 + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, 276 + dw_ced->ced.name, &dw_ced->ced); 282 277 if (err) { 283 278 pr_err("failed to request timer irq\n"); 284 279 kfree(dw_ced);
+4 -8
drivers/clocksource/exynos_mct.c
··· 329 329 return IRQ_HANDLED; 330 330 } 331 331 332 - static struct irqaction mct_comp_event_irq = { 333 - .name = "mct_comp_irq", 334 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 335 - .handler = exynos4_mct_comp_isr, 336 - .dev_id = &mct_comp_device, 337 - }; 338 - 339 332 static int exynos4_clockevent_init(void) 340 333 { 341 334 mct_comp_device.cpumask = cpumask_of(0); 342 335 clockevents_config_and_register(&mct_comp_device, clk_rate, 343 336 0xf, 0xffffffff); 344 - setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); 337 + if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr, 338 + IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq", 339 + &mct_comp_device)) 340 + pr_err("%s: request_irq() failed\n", "mct_comp_irq"); 345 341 346 342 return 0; 347 343 }
+7
drivers/clocksource/hyperv_timer.c
··· 370 370 hv_set_reference_tsc(tsc_msr); 371 371 } 372 372 373 + static int hv_cs_enable(struct clocksource *cs) 374 + { 375 + hv_enable_vdso_clocksource(); 376 + return 0; 377 + } 378 + 373 379 static struct clocksource hyperv_cs_tsc = { 374 380 .name = "hyperv_clocksource_tsc_page", 375 381 .rating = 250, ··· 384 378 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 385 379 .suspend= suspend_hv_clock_tsc, 386 380 .resume = resume_hv_clock_tsc, 381 + .enable = hv_cs_enable, 387 382 }; 388 383 389 384 static u64 notrace read_hv_clock_msr(void)
+189
drivers/clocksource/ingenic-ost.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * JZ47xx SoCs TCU Operating System Timer driver 4 + * 5 + * Copyright (C) 2016 Maarten ter Huurne <maarten@treewalker.org> 6 + * Copyright (C) 2020 Paul Cercueil <paul@crapouillou.net> 7 + */ 8 + 9 + #include <linux/clk.h> 10 + #include <linux/clocksource.h> 11 + #include <linux/mfd/ingenic-tcu.h> 12 + #include <linux/mfd/syscon.h> 13 + #include <linux/of.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/pm.h> 16 + #include <linux/regmap.h> 17 + #include <linux/sched_clock.h> 18 + 19 + #define TCU_OST_TCSR_MASK 0xffc0 20 + #define TCU_OST_TCSR_CNT_MD BIT(15) 21 + 22 + #define TCU_OST_CHANNEL 15 23 + 24 + /* 25 + * The TCU_REG_OST_CNT{L,R} from <linux/mfd/ingenic-tcu.h> are only for the 26 + * regmap; these are for use with the __iomem pointer. 27 + */ 28 + #define OST_REG_CNTL 0x4 29 + #define OST_REG_CNTH 0x8 30 + 31 + struct ingenic_ost_soc_info { 32 + bool is64bit; 33 + }; 34 + 35 + struct ingenic_ost { 36 + void __iomem *regs; 37 + struct clk *clk; 38 + 39 + struct clocksource cs; 40 + }; 41 + 42 + static struct ingenic_ost *ingenic_ost; 43 + 44 + static u64 notrace ingenic_ost_read_cntl(void) 45 + { 46 + /* Read using __iomem pointer instead of regmap to avoid locking */ 47 + return readl(ingenic_ost->regs + OST_REG_CNTL); 48 + } 49 + 50 + static u64 notrace ingenic_ost_read_cnth(void) 51 + { 52 + /* Read using __iomem pointer instead of regmap to avoid locking */ 53 + return readl(ingenic_ost->regs + OST_REG_CNTH); 54 + } 55 + 56 + static u64 notrace ingenic_ost_clocksource_readl(struct clocksource *cs) 57 + { 58 + return ingenic_ost_read_cntl(); 59 + } 60 + 61 + static u64 notrace ingenic_ost_clocksource_readh(struct clocksource *cs) 62 + { 63 + return ingenic_ost_read_cnth(); 64 + } 65 + 66 + static int __init ingenic_ost_probe(struct platform_device *pdev) 67 + { 68 + const struct ingenic_ost_soc_info *soc_info; 69 + struct device *dev = &pdev->dev; 70 + struct ingenic_ost *ost; 71 + struct clocksource *cs; 72 + struct regmap *map; 73 + unsigned long rate; 74 + int err; 75 + 76 + soc_info = device_get_match_data(dev); 77 + if (!soc_info) 78 + return -EINVAL; 79 + 80 + ost = devm_kzalloc(dev, sizeof(*ost), GFP_KERNEL); 81 + if (!ost) 82 + return -ENOMEM; 83 + 84 + ingenic_ost = ost; 85 + 86 + ost->regs = devm_platform_ioremap_resource(pdev, 0); 87 + if (IS_ERR(ost->regs)) 88 + return PTR_ERR(ost->regs); 89 + 90 + map = device_node_to_regmap(dev->parent->of_node); 91 + if (!map) { 92 + dev_err(dev, "regmap not found"); 93 + return -EINVAL; 94 + } 95 + 96 + ost->clk = devm_clk_get(dev, "ost"); 97 + if (IS_ERR(ost->clk)) 98 + return PTR_ERR(ost->clk); 99 + 100 + err = clk_prepare_enable(ost->clk); 101 + if (err) 102 + return err; 103 + 104 + /* Clear counter high/low registers */ 105 + if (soc_info->is64bit) 106 + regmap_write(map, TCU_REG_OST_CNTL, 0); 107 + regmap_write(map, TCU_REG_OST_CNTH, 0); 108 + 109 + /* Don't reset counter at compare value. */ 110 + regmap_update_bits(map, TCU_REG_OST_TCSR, 111 + TCU_OST_TCSR_MASK, TCU_OST_TCSR_CNT_MD); 112 + 113 + rate = clk_get_rate(ost->clk); 114 + 115 + /* Enable OST TCU channel */ 116 + regmap_write(map, TCU_REG_TESR, BIT(TCU_OST_CHANNEL)); 117 + 118 + cs = &ost->cs; 119 + cs->name = "ingenic-ost"; 120 + cs->rating = 320; 121 + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 122 + cs->mask = CLOCKSOURCE_MASK(32); 123 + 124 + if (soc_info->is64bit) 125 + cs->read = ingenic_ost_clocksource_readl; 126 + else 127 + cs->read = ingenic_ost_clocksource_readh; 128 + 129 + err = clocksource_register_hz(cs, rate); 130 + if (err) { 131 + dev_err(dev, "clocksource registration failed"); 132 + clk_disable_unprepare(ost->clk); 133 + return err; 134 + } 135 + 136 + if (soc_info->is64bit) 137 + sched_clock_register(ingenic_ost_read_cntl, 32, rate); 138 + else 139 + sched_clock_register(ingenic_ost_read_cnth, 32, rate); 140 + 141 + return 0; 142 + } 143 + 144 + static int __maybe_unused ingenic_ost_suspend(struct device *dev) 145 + { 146 + struct ingenic_ost *ost = dev_get_drvdata(dev); 147 + 148 + clk_disable(ost->clk); 149 + 150 + return 0; 151 + } 152 + 153 + static int __maybe_unused ingenic_ost_resume(struct device *dev) 154 + { 155 + struct ingenic_ost *ost = dev_get_drvdata(dev); 156 + 157 + return clk_enable(ost->clk); 158 + } 159 + 160 + static const struct dev_pm_ops __maybe_unused ingenic_ost_pm_ops = { 161 + /* _noirq: We want the OST clock to be gated last / ungated first */ 162 + .suspend_noirq = ingenic_ost_suspend, 163 + .resume_noirq = ingenic_ost_resume, 164 + }; 165 + 166 + static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = { 167 + .is64bit = false, 168 + }; 169 + 170 + static const struct ingenic_ost_soc_info jz4770_ost_soc_info = { 171 + .is64bit = true, 172 + }; 173 + 174 + static const struct of_device_id ingenic_ost_of_match[] = { 175 + { .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, }, 176 + { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, }, 177 + { } 178 + }; 179 + 180 + static struct platform_driver ingenic_ost_driver = { 181 + .driver = { 182 + .name = "ingenic-ost", 183 + #ifdef CONFIG_PM_SUSPEND 184 + .pm = &ingenic_ost_pm_ops, 185 + #endif 186 + .of_match_table = ingenic_ost_of_match, 187 + }, 188 + }; 189 + builtin_platform_driver_probe(ingenic_ost_driver, ingenic_ost_probe);
+2 -1
drivers/clocksource/ingenic-timer.c
··· 230 230 { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, }, 231 231 { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, }, 232 232 { .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, }, 233 + { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, }, 233 234 { /* sentinel */ } 234 235 }; 235 236 ··· 303 302 TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init); 304 303 TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init); 305 304 TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init); 306 - 305 + TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init); 307 306 308 307 static int __init ingenic_tcu_probe(struct platform_device *pdev) 309 308 {
+4 -4
drivers/clocksource/mips-gic-timer.c
··· 155 155 } 156 156 157 157 static struct clocksource gic_clocksource = { 158 - .name = "GIC", 159 - .read = gic_hpt_read, 160 - .flags = CLOCK_SOURCE_IS_CONTINUOUS, 161 - .archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC }, 158 + .name = "GIC", 159 + .read = gic_hpt_read, 160 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 161 + .vdso_clock_mode = VDSO_CLOCKMODE_GIC, 162 162 }; 163 163 164 164 static int __init __gic_clocksource_init(void)
+2 -8
drivers/clocksource/mxs_timer.c
··· 117 117 return IRQ_HANDLED; 118 118 } 119 119 120 - static struct irqaction mxs_timer_irq = { 121 - .name = "MXS Timer Tick", 122 - .dev_id = &mxs_clockevent_device, 123 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 124 - .handler = mxs_timer_interrupt, 125 - }; 126 - 127 120 static void mxs_irq_clear(char *state) 128 121 { 129 122 /* Disable interrupt in timer module */ ··· 270 277 if (irq <= 0) 271 278 return -EINVAL; 272 279 273 - return setup_irq(irq, &mxs_timer_irq); 280 + return request_irq(irq, mxs_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 281 + "MXS Timer Tick", &mxs_clockevent_device); 274 282 } 275 283 TIMER_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
+3 -8
drivers/clocksource/nomadik-mtu.c
··· 181 181 return IRQ_HANDLED; 182 182 } 183 183 184 - static struct irqaction nmdk_timer_irq = { 185 - .name = "Nomadik Timer Tick", 186 - .flags = IRQF_TIMER, 187 - .handler = nmdk_timer_interrupt, 188 - .dev_id = &nmdk_clkevt, 189 - }; 190 - 191 184 static int __init nmdk_timer_init(void __iomem *base, int irq, 192 185 struct clk *pclk, struct clk *clk) 193 186 { ··· 225 232 sched_clock_register(nomadik_read_sched_clock, 32, rate); 226 233 227 234 /* Timer 1 is used for events, register irq and clockevents */ 228 - setup_irq(irq, &nmdk_timer_irq); 235 + if (request_irq(irq, nmdk_timer_interrupt, IRQF_TIMER, 236 + "Nomadik Timer Tick", &nmdk_clkevt)) 237 + pr_err("%s: request_irq() failed\n", "Nomadik Timer Tick"); 229 238 nmdk_clkevt.cpumask = cpumask_of(0); 230 239 nmdk_clkevt.irq = irq; 231 240 clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
+4 -8
drivers/clocksource/samsung_pwm_timer.c
··· 256 256 return IRQ_HANDLED; 257 257 } 258 258 259 - static struct irqaction samsung_clock_event_irq = { 260 - .name = "samsung_time_irq", 261 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 262 - .handler = samsung_clock_event_isr, 263 - .dev_id = &time_event_device, 264 - }; 265 - 266 259 static void __init samsung_clockevent_init(void) 267 260 { 268 261 unsigned long pclk; ··· 275 282 clock_rate, 1, pwm.tcnt_max); 276 283 277 284 irq_number = pwm.irq[pwm.event_id]; 278 - setup_irq(irq_number, &samsung_clock_event_irq); 285 + if (request_irq(irq_number, samsung_clock_event_isr, 286 + IRQF_TIMER | IRQF_IRQPOLL, "samsung_time_irq", 287 + &time_event_device)) 288 + pr_err("%s: request_irq() failed\n", "samsung_time_irq"); 279 289 280 290 if (pwm.variant.has_tint_cstat) { 281 291 u32 mask = (1 << pwm.event_id);
+22 -26
drivers/clocksource/timer-atlas7.c
··· 159 159 .resume = sirfsoc_clocksource_resume, 160 160 }; 161 161 162 - static struct irqaction sirfsoc_timer_irq = { 163 - .name = "sirfsoc_timer0", 164 - .flags = IRQF_TIMER | IRQF_NOBALANCING, 165 - .handler = sirfsoc_timer_interrupt, 166 - }; 167 - 168 - static struct irqaction sirfsoc_timer1_irq = { 169 - .name = "sirfsoc_timer1", 170 - .flags = IRQF_TIMER | IRQF_NOBALANCING, 171 - .handler = sirfsoc_timer_interrupt, 172 - }; 162 + static unsigned int sirfsoc_timer_irq, sirfsoc_timer1_irq; 173 163 174 164 static int sirfsoc_local_timer_starting_cpu(unsigned int cpu) 175 165 { 176 166 struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu); 177 - struct irqaction *action; 167 + unsigned int irq; 168 + const char *name; 178 169 179 - if (cpu == 0) 180 - action = &sirfsoc_timer_irq; 181 - else 182 - action = &sirfsoc_timer1_irq; 170 + if (cpu == 0) { 171 + irq = sirfsoc_timer_irq; 172 + name = "sirfsoc_timer0"; 173 + } else { 174 + irq = sirfsoc_timer1_irq; 175 + name = "sirfsoc_timer1"; 176 + } 183 177 184 - ce->irq = action->irq; 178 + ce->irq = irq; 185 179 ce->name = "local_timer"; 186 180 ce->features = CLOCK_EVT_FEAT_ONESHOT; 187 181 ce->rating = 200; ··· 190 196 ce->min_delta_ticks = 2; 191 197 ce->cpumask = cpumask_of(cpu); 192 198 193 - action->dev_id = ce; 194 - BUG_ON(setup_irq(ce->irq, action)); 195 - irq_force_affinity(action->irq, cpumask_of(cpu)); 199 + BUG_ON(request_irq(ce->irq, sirfsoc_timer_interrupt, 200 + IRQF_TIMER | IRQF_NOBALANCING, name, ce)); 201 + irq_force_affinity(ce->irq, cpumask_of(cpu)); 196 202 197 203 clockevents_register_device(ce); 198 204 return 0; ··· 200 206 201 207 static int sirfsoc_local_timer_dying_cpu(unsigned int cpu) 202 208 { 209 + struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu); 210 + 203 211 sirfsoc_timer_count_disable(1); 204 212 205 213 if (cpu == 0) 206 - remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); 214 + free_irq(sirfsoc_timer_irq, ce); 207 215 else 208 - remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); 216 + free_irq(sirfsoc_timer1_irq, ce); 209 217 return 0; 210 218 } 211 219 ··· 264 268 return -ENXIO; 265 269 } 266 270 267 - sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); 268 - if (!sirfsoc_timer_irq.irq) { 271 + sirfsoc_timer_irq = irq_of_parse_and_map(np, 0); 272 + if (!sirfsoc_timer_irq) { 269 273 pr_err("No irq passed for timer0 via DT\n"); 270 274 return -EINVAL; 271 275 } 272 276 273 - sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); 274 - if (!sirfsoc_timer1_irq.irq) { 277 + sirfsoc_timer1_irq = irq_of_parse_and_map(np, 1); 278 + if (!sirfsoc_timer1_irq) { 275 279 pr_err("No irq passed for timer1 via DT\n"); 276 280 return -EINVAL; 277 281 }
+2 -7
drivers/clocksource/timer-cs5535.c
··· 131 131 return IRQ_HANDLED; 132 132 } 133 133 134 - static struct irqaction mfgptirq = { 135 - .handler = mfgpt_tick, 136 - .flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED, 137 - .name = DRV_NAME, 138 - }; 139 - 140 134 static int __init cs5535_mfgpt_init(void) 141 135 { 136 + unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED; 142 137 struct cs5535_mfgpt_timer *timer; 143 138 int ret; 144 139 uint16_t val; ··· 153 158 } 154 159 155 160 /* And register it with the kernel */ 156 - ret = setup_irq(timer_irq, &mfgptirq); 161 + ret = request_irq(timer_irq, mfgpt_tick, flags, DRV_NAME, timer); 157 162 if (ret) { 158 163 printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n"); 159 164 goto err_irq;
+2 -8
drivers/clocksource/timer-efm32.c
··· 119 119 }, 120 120 }; 121 121 122 - static struct irqaction efm32_clock_event_irq = { 123 - .name = "efm32 clockevent", 124 - .flags = IRQF_TIMER, 125 - .handler = efm32_clock_event_handler, 126 - .dev_id = &clock_event_ddata, 127 - }; 128 - 129 122 static int __init efm32_clocksource_init(struct device_node *np) 130 123 { 131 124 struct clk *clk; ··· 223 230 DIV_ROUND_CLOSEST(rate, 1024), 224 231 0xf, 0xffff); 225 232 226 - ret = setup_irq(irq, &efm32_clock_event_irq); 233 + ret = request_irq(irq, efm32_clock_event_handler, IRQF_TIMER, 234 + "efm32 clockevent", &clock_event_ddata); 227 235 if (ret) { 228 236 pr_err("Failed setup irq\n"); 229 237 goto err_setup_irq;
+2 -8
drivers/clocksource/timer-fsl-ftm.c
··· 176 176 .rating = 300, 177 177 }; 178 178 179 - static struct irqaction ftm_timer_irq = { 180 - .name = "Freescale ftm timer", 181 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 182 - .handler = ftm_evt_interrupt, 183 - .dev_id = &ftm_clockevent, 184 - }; 185 - 186 179 static int __init ftm_clockevent_init(unsigned long freq, int irq) 187 180 { 188 181 int err; ··· 185 192 186 193 ftm_reset_counter(priv->clkevt_base); 187 194 188 - err = setup_irq(irq, &ftm_timer_irq); 195 + err = request_irq(irq, ftm_evt_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 196 + "Freescale ftm timer", &ftm_clockevent); 189 197 if (err) { 190 198 pr_err("ftm: setup irq failed: %d\n", err); 191 199 return err;
+53 -15
drivers/clocksource/timer-fttmr010.c
··· 38 38 #define TIMER_CR (0x30) 39 39 40 40 /* 41 + * Control register set to clear for ast2600 only. 42 + */ 43 + #define AST2600_TIMER_CR_CLR (0x3c) 44 + 45 + /* 41 46 * Control register (TMC30) bit fields for fttmr010/gemini/moxart timers. 42 47 */ 43 48 #define TIMER_1_CR_ENABLE BIT(0) ··· 102 97 bool is_aspeed; 103 98 u32 t1_enable_val; 104 99 struct clock_event_device clkevt; 100 + int (*timer_shutdown)(struct clock_event_device *evt); 105 101 #ifdef CONFIG_ARM 106 102 struct delay_timer delay_timer; 107 103 #endif ··· 146 140 u32 cr; 147 141 148 142 /* Stop */ 149 - cr = readl(fttmr010->base + TIMER_CR); 150 - cr &= ~fttmr010->t1_enable_val; 151 - writel(cr, fttmr010->base + TIMER_CR); 143 + fttmr010->timer_shutdown(evt); 152 144 153 145 if (fttmr010->is_aspeed) { 154 146 /* ··· 164 160 cr = readl(fttmr010->base + TIMER_CR); 165 161 cr |= fttmr010->t1_enable_val; 166 162 writel(cr, fttmr010->base + TIMER_CR); 163 + 164 + return 0; 165 + } 166 + 167 + static int ast2600_timer_shutdown(struct clock_event_device *evt) 168 + { 169 + struct fttmr010 *fttmr010 = to_fttmr010(evt); 170 + 171 + /* Stop */ 172 + writel(fttmr010->t1_enable_val, fttmr010->base + AST2600_TIMER_CR_CLR); 167 173 168 174 return 0; 169 175 } ··· 197 183 u32 cr; 198 184 199 185 /* Stop */ 200 - cr = readl(fttmr010->base + TIMER_CR); 201 - cr &= ~fttmr010->t1_enable_val; 202 - writel(cr, fttmr010->base + TIMER_CR); 186 + fttmr010->timer_shutdown(evt); 203 187 204 188 /* Setup counter start from 0 or ~0 */ 205 189 writel(0, fttmr010->base + TIMER1_COUNT); ··· 223 211 u32 cr; 224 212 225 213 /* Stop */ 226 - cr = readl(fttmr010->base + TIMER_CR); 227 - cr &= ~fttmr010->t1_enable_val; 228 - writel(cr, fttmr010->base + TIMER_CR); 214 + fttmr010->timer_shutdown(evt); 229 215 230 216 /* Setup timer to fire at 1/HZ intervals. */ 231 217 if (fttmr010->is_aspeed) { ··· 259 249 return IRQ_HANDLED; 260 250 } 261 251 262 - static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed) 252 + static irqreturn_t ast2600_timer_interrupt(int irq, void *dev_id) 253 + { 254 + struct clock_event_device *evt = dev_id; 255 + struct fttmr010 *fttmr010 = to_fttmr010(evt); 256 + 257 + writel(0x1, fttmr010->base + TIMER_INTR_STATE); 258 + 259 + evt->event_handler(evt); 260 + return IRQ_HANDLED; 261 + } 262 + 263 + static int __init fttmr010_common_init(struct device_node *np, 264 + bool is_aspeed, 265 + int (*timer_shutdown)(struct clock_event_device *), 266 + irq_handler_t irq_handler) 263 267 { 264 268 struct fttmr010 *fttmr010; 265 269 int irq; ··· 374 350 fttmr010->tick_rate); 375 351 } 376 352 353 + fttmr010->timer_shutdown = timer_shutdown; 354 + 377 355 /* 378 356 * Setup clockevent timer (interrupt-driven) on timer 1. 379 357 */ ··· 383 357 writel(0, fttmr010->base + TIMER1_LOAD); 384 358 writel(0, fttmr010->base + TIMER1_MATCH1); 385 359 writel(0, fttmr010->base + TIMER1_MATCH2); 386 - ret = request_irq(irq, fttmr010_timer_interrupt, IRQF_TIMER, 360 + ret = request_irq(irq, irq_handler, IRQF_TIMER, 387 361 "FTTMR010-TIMER1", &fttmr010->clkevt); 388 362 if (ret) { 389 363 pr_err("FTTMR010-TIMER1 no IRQ\n"); ··· 396 370 fttmr010->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | 397 371 CLOCK_EVT_FEAT_ONESHOT; 398 372 fttmr010->clkevt.set_next_event = fttmr010_timer_set_next_event; 399 - fttmr010->clkevt.set_state_shutdown = fttmr010_timer_shutdown; 373 + fttmr010->clkevt.set_state_shutdown = fttmr010->timer_shutdown; 400 374 fttmr010->clkevt.set_state_periodic = fttmr010_timer_set_periodic; 401 375 fttmr010->clkevt.set_state_oneshot = fttmr010_timer_set_oneshot; 402 - fttmr010->clkevt.tick_resume = fttmr010_timer_shutdown; 376 + fttmr010->clkevt.tick_resume = fttmr010->timer_shutdown; 403 377 fttmr010->clkevt.cpumask = cpumask_of(0); 404 378 fttmr010->clkevt.irq = irq; 405 379 clockevents_config_and_register(&fttmr010->clkevt, ··· 430 404 return ret; 431 405 } 432 406 407 + static __init int ast2600_timer_init(struct device_node *np) 408 + { 409 + return fttmr010_common_init(np, true, 410 + ast2600_timer_shutdown, 411 + ast2600_timer_interrupt); 412 + } 413 + 433 414 static __init int aspeed_timer_init(struct device_node *np) 434 415 { 435 - return fttmr010_common_init(np, true); 416 + return fttmr010_common_init(np, true, 417 + fttmr010_timer_shutdown, 418 + fttmr010_timer_interrupt); 436 419 } 437 420 438 421 static __init int fttmr010_timer_init(struct device_node *np) 439 422 { 440 - return fttmr010_common_init(np, false); 423 + return fttmr010_common_init(np, false, 424 + fttmr010_timer_shutdown, 425 + fttmr010_timer_interrupt); 441 426 } 442 427 443 428 TIMER_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_init); ··· 456 419 TIMER_OF_DECLARE(moxart, "moxa,moxart-timer", fttmr010_timer_init); 457 420 TIMER_OF_DECLARE(ast2400, "aspeed,ast2400-timer", aspeed_timer_init); 458 421 TIMER_OF_DECLARE(ast2500, "aspeed,ast2500-timer", aspeed_timer_init); 422 + TIMER_OF_DECLARE(ast2600, "aspeed,ast2600-timer", ast2600_timer_init);
+2 -8
drivers/clocksource/timer-imx-gpt.c
··· 67 67 struct clk *clk_ipg; 68 68 const struct imx_gpt_data *gpt; 69 69 struct clock_event_device ced; 70 - struct irqaction act; 71 70 }; 72 71 73 72 struct imx_gpt_data { ··· 272 273 static int __init mxc_clockevent_init(struct imx_timer *imxtm) 273 274 { 274 275 struct clock_event_device *ced = &imxtm->ced; 275 - struct irqaction *act = &imxtm->act; 276 276 277 277 ced->name = "mxc_timer1"; 278 278 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; ··· 285 287 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), 286 288 0xff, 0xfffffffe); 287 289 288 - act->name = "i.MX Timer Tick"; 289 - act->flags = IRQF_TIMER | IRQF_IRQPOLL; 290 - act->handler = mxc_timer_interrupt; 291 - act->dev_id = ced; 292 - 293 - return setup_irq(imxtm->irq, act); 290 + return request_irq(imxtm->irq, mxc_timer_interrupt, 291 + IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced); 294 292 } 295 293 296 294 static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
-2
drivers/clocksource/timer-imx-sysctr.c
··· 4 4 5 5 #include <linux/interrupt.h> 6 6 #include <linux/clockchips.h> 7 - #include <linux/of_address.h> 8 - #include <linux/of_irq.h> 9 7 10 8 #include "timer-of.h" 11 9
-2
drivers/clocksource/timer-imx-tpm.c
··· 8 8 #include <linux/clocksource.h> 9 9 #include <linux/delay.h> 10 10 #include <linux/interrupt.h> 11 - #include <linux/of_address.h> 12 - #include <linux/of_irq.h> 13 11 #include <linux/sched_clock.h> 14 12 15 13 #include "timer-of.h"
+3 -8
drivers/clocksource/timer-integrator-ap.c
··· 123 123 .rating = 300, 124 124 }; 125 125 126 - static struct irqaction integrator_timer_irq = { 127 - .name = "timer", 128 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 129 - .handler = integrator_timer_interrupt, 130 - .dev_id = &integrator_clockevent, 131 - }; 132 - 133 126 static int integrator_clockevent_init(unsigned long inrate, 134 127 void __iomem *base, int irq) 135 128 { ··· 142 149 timer_reload = rate / HZ; 143 150 writel(ctrl, clkevt_base + TIMER_CTRL); 144 151 145 - ret = setup_irq(irq, &integrator_timer_irq); 152 + ret = request_irq(irq, integrator_timer_interrupt, 153 + IRQF_TIMER | IRQF_IRQPOLL, "timer", 154 + &integrator_clockevent); 146 155 if (ret) 147 156 return ret; 148 157
+3 -8
drivers/clocksource/timer-meson6.c
··· 150 150 return IRQ_HANDLED; 151 151 } 152 152 153 - static struct irqaction meson6_timer_irq = { 154 - .name = "meson6_timer", 155 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 156 - .handler = meson6_timer_interrupt, 157 - .dev_id = &meson6_clockevent, 158 - }; 159 - 160 153 static int __init meson6_timer_init(struct device_node *node) 161 154 { 162 155 u32 val; ··· 187 194 /* Stop the timer A */ 188 195 meson6_clkevt_time_stop(); 189 196 190 - ret = setup_irq(irq, &meson6_timer_irq); 197 + ret = request_irq(irq, meson6_timer_interrupt, 198 + IRQF_TIMER | IRQF_IRQPOLL, "meson6_timer", 199 + &meson6_clockevent); 191 200 if (ret) { 192 201 pr_warn("failed to setup irq %d\n", irq); 193 202 return ret;
+1
drivers/clocksource/timer-microchip-pit64b.c
··· 264 264 265 265 if (!best_diff) { 266 266 timer->mode |= MCHP_PIT64B_MR_SGCLK; 267 + clk_set_rate(timer->gclk, gclk_round); 267 268 goto done; 268 269 } 269 270
+2 -7
drivers/clocksource/timer-orion.c
··· 114 114 return IRQ_HANDLED; 115 115 } 116 116 117 - static struct irqaction orion_clkevt_irq = { 118 - .name = "orion_event", 119 - .flags = IRQF_TIMER, 120 - .handler = orion_clkevt_irq_handler, 121 - }; 122 - 123 117 static int __init orion_timer_init(struct device_node *np) 124 118 { 125 119 unsigned long rate; ··· 166 172 sched_clock_register(orion_read_sched_clock, 32, rate); 167 173 168 174 /* setup timer1 as clockevent timer */ 169 - ret = setup_irq(irq, &orion_clkevt_irq); 175 + ret = request_irq(irq, orion_clkevt_irq_handler, IRQF_TIMER, 176 + "orion_event", NULL); 170 177 if (ret) { 171 178 pr_err("%pOFn: unable to setup irq\n", np); 172 179 return ret;
+11 -4
drivers/clocksource/timer-owl.c
··· 135 135 } 136 136 137 137 clk = of_clk_get(node, 0); 138 - if (IS_ERR(clk)) 139 - return PTR_ERR(clk); 138 + if (IS_ERR(clk)) { 139 + ret = PTR_ERR(clk); 140 + pr_err("Failed to get clock for clocksource (%d)\n", ret); 141 + return ret; 142 + } 140 143 141 144 rate = clk_get_rate(clk); 142 145 ··· 147 144 owl_timer_set_enabled(owl_clksrc_base, true); 148 145 149 146 sched_clock_register(owl_timer_sched_read, 32, rate); 150 - clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name, 151 - rate, 200, 32, clocksource_mmio_readl_up); 147 + ret = clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name, 148 + rate, 200, 32, clocksource_mmio_readl_up); 149 + if (ret) { 150 + pr_err("Failed to register clocksource (%d)\n", ret); 151 + return ret; 152 + } 152 153 153 154 owl_timer_reset(owl_clkevt_base); 154 155
+4 -10
drivers/clocksource/timer-prima2.c
··· 165 165 .resume = sirfsoc_clocksource_resume, 166 166 }; 167 167 168 - static struct irqaction sirfsoc_timer_irq = { 169 - .name = "sirfsoc_timer0", 170 - .flags = IRQF_TIMER, 171 - .irq = 0, 172 - .handler = sirfsoc_timer_interrupt, 173 - .dev_id = &sirfsoc_clockevent, 174 - }; 175 - 176 168 /* Overwrite weak default sched_clock with more precise one */ 177 169 static u64 notrace sirfsoc_read_sched_clock(void) 178 170 { ··· 182 190 static int __init sirfsoc_prima2_timer_init(struct device_node *np) 183 191 { 184 192 unsigned long rate; 193 + unsigned int irq; 185 194 struct clk *clk; 186 195 int ret; 187 196 ··· 211 218 return -ENXIO; 212 219 } 213 220 214 - sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); 221 + irq = irq_of_parse_and_map(np, 0); 215 222 216 223 writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, 217 224 sirfsoc_timer_base + SIRFSOC_TIMER_DIV); ··· 227 234 228 235 sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); 229 236 230 - ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); 237 + ret = request_irq(irq, sirfsoc_timer_interrupt, IRQF_TIMER, 238 + "sirfsoc_timer0", &sirfsoc_clockevent); 231 239 if (ret) { 232 240 pr_err("Failed to setup irq\n"); 233 241 return ret;
+2 -8
drivers/clocksource/timer-pxa.c
··· 143 143 .resume = pxa_timer_resume, 144 144 }; 145 145 146 - static struct irqaction pxa_ost0_irq = { 147 - .name = "ost0", 148 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 149 - .handler = pxa_ost0_interrupt, 150 - .dev_id = &ckevt_pxa_osmr0, 151 - }; 152 - 153 146 static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) 154 147 { 155 148 int ret; ··· 154 161 155 162 ckevt_pxa_osmr0.cpumask = cpumask_of(0); 156 163 157 - ret = setup_irq(irq, &pxa_ost0_irq); 164 + ret = request_irq(irq, pxa_ost0_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 165 + "ost0", &ckevt_pxa_osmr0); 158 166 if (ret) { 159 167 pr_err("Failed to setup irq\n"); 160 168 return ret;
+3 -8
drivers/clocksource/timer-sp804.c
··· 168 168 .rating = 300, 169 169 }; 170 170 171 - static struct irqaction sp804_timer_irq = { 172 - .name = "timer", 173 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 174 - .handler = sp804_timer_interrupt, 175 - .dev_id = &sp804_clockevent, 176 - }; 177 - 178 171 int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) 179 172 { 180 173 struct clock_event_device *evt = &sp804_clockevent; ··· 193 200 194 201 writel(0, base + TIMER_CTRL); 195 202 196 - setup_irq(irq, &sp804_timer_irq); 203 + if (request_irq(irq, sp804_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 204 + "timer", &sp804_clockevent)) 205 + pr_err("%s: request_irq() failed\n", "timer"); 197 206 clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); 198 207 199 208 return 0;
+110 -107
drivers/clocksource/timer-ti-dm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 2 3 * linux/arch/arm/plat-omap/dmtimer.c 3 4 * ··· 16 15 * 17 16 * Copyright (C) 2009 Texas Instruments 18 17 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 19 - * 20 - * This program is free software; you can redistribute it and/or modify it 21 - * under the terms of the GNU General Public License as published by the 22 - * Free Software Foundation; either version 2 of the License, or (at your 23 - * option) any later version. 24 - * 25 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 28 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 29 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 30 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 - * 34 - * You should have received a copy of the GNU General Public License along 35 - * with this program; if not, write to the Free Software Foundation, Inc., 36 - * 675 Mass Ave, Cambridge, MA 02139, USA. 37 18 */ 38 19 39 20 #include <linux/clk.h> 40 21 #include <linux/clk-provider.h> 22 + #include <linux/cpu_pm.h> 41 23 #include <linux/module.h> 42 24 #include <linux/io.h> 43 25 #include <linux/device.h> ··· 93 109 timer->context.tclr); 94 110 } 95 111 112 + static void omap_timer_save_context(struct omap_dm_timer *timer) 113 + { 114 + timer->context.tclr = 115 + omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 116 + timer->context.twer = 117 + omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG); 118 + timer->context.tldr = 119 + omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG); 120 + timer->context.tmar = 121 + omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG); 122 + timer->context.tier = readl_relaxed(timer->irq_ena); 123 + timer->context.tsicr = 124 + omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG); 125 + } 126 + 127 + static int omap_timer_context_notifier(struct notifier_block *nb, 128 + unsigned long cmd, void *v) 129 + { 130 + struct omap_dm_timer *timer; 131 + 132 + timer = container_of(nb, struct omap_dm_timer, nb); 133 + 134 + switch (cmd) { 135 + case CPU_CLUSTER_PM_ENTER: 136 + if ((timer->capability & OMAP_TIMER_ALWON) || 137 + !atomic_read(&timer->enabled)) 138 + break; 139 + omap_timer_save_context(timer); 140 + break; 141 + case CPU_CLUSTER_PM_ENTER_FAILED: 142 + case CPU_CLUSTER_PM_EXIT: 143 + if ((timer->capability & OMAP_TIMER_ALWON) || 144 + !atomic_read(&timer->enabled)) 145 + break; 146 + omap_timer_restore_context(timer); 147 + break; 148 + } 149 + 150 + return NOTIFY_OK; 151 + } 152 + 96 153 static int omap_dm_timer_reset(struct omap_dm_timer *timer) 97 154 { 98 155 u32 l, timeout = 100000; ··· 161 136 timer->posted = 0; 162 137 163 138 return 0; 164 - } 165 - 166 - static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) 167 - { 168 - int ret; 169 - struct clk *parent; 170 - 171 - /* 172 - * FIXME: OMAP1 devices do not use the clock framework for dmtimers so 173 - * do not call clk_get() for these devices. 174 - */ 175 - if (!timer->fclk) 176 - return -ENODEV; 177 - 178 - parent = clk_get(&timer->pdev->dev, NULL); 179 - if (IS_ERR(parent)) 180 - return -ENODEV; 181 - 182 - /* Bail out if both clocks point to fck */ 183 - if (clk_is_match(parent, timer->fclk)) 184 - return 0; 185 - 186 - ret = clk_set_parent(timer->fclk, parent); 187 - if (ret < 0) 188 - pr_err("%s: failed to set parent\n", __func__); 189 - 190 - clk_put(parent); 191 - 192 - return ret; 193 139 } 194 140 195 141 static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) ··· 221 225 222 226 static void omap_dm_timer_enable(struct omap_dm_timer *timer) 223 227 { 224 - int c; 225 - 226 228 pm_runtime_get_sync(&timer->pdev->dev); 227 - 228 - if (!(timer->capability & OMAP_TIMER_ALWON)) { 229 - if (timer->get_context_loss_count) { 230 - c = timer->get_context_loss_count(&timer->pdev->dev); 231 - if (c != timer->ctx_loss_count) { 232 - omap_timer_restore_context(timer); 233 - timer->ctx_loss_count = c; 234 - } 235 - } else { 236 - omap_timer_restore_context(timer); 237 - } 238 - } 239 229 } 240 230 241 231 static void omap_dm_timer_disable(struct omap_dm_timer *timer) ··· 258 276 __omap_dm_timer_enable_posted(timer); 259 277 omap_dm_timer_disable(timer); 260 278 261 - rc = omap_dm_timer_of_set_source(timer); 262 - if (rc == -ENODEV) 263 - return omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ); 279 + rc = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ); 264 280 265 281 return rc; 266 282 } ··· 488 508 489 509 int omap_dm_timer_trigger(struct omap_dm_timer *timer) 490 510 { 491 - if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { 511 + if (unlikely(!timer || !atomic_read(&timer->enabled))) { 492 512 pr_err("%s: timer not available or enabled.\n", __func__); 493 513 return -EINVAL; 494 514 } ··· 512 532 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 513 533 } 514 534 515 - /* Save the context */ 516 - timer->context.tclr = l; 517 535 return 0; 518 536 } 519 537 ··· 527 549 528 550 __omap_dm_timer_stop(timer, timer->posted, rate); 529 551 530 - /* 531 - * Since the register values are computed and written within 532 - * __omap_dm_timer_stop, we need to use read to retrieve the 533 - * context. 534 - */ 535 - timer->context.tclr = 536 - omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 537 552 omap_dm_timer_disable(timer); 538 553 return 0; 539 554 } 540 555 541 - static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, 556 + static int omap_dm_timer_set_load(struct omap_dm_timer *timer, 542 557 unsigned int load) 543 558 { 544 - u32 l; 545 - 546 559 if (unlikely(!timer)) 547 560 return -EINVAL; 548 561 549 562 omap_dm_timer_enable(timer); 550 - l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 551 - if (autoreload) 552 - l |= OMAP_TIMER_CTRL_AR; 553 - else 554 - l &= ~OMAP_TIMER_CTRL_AR; 555 - omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 556 563 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load); 557 564 558 - omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0); 559 - /* Save the context */ 560 - timer->context.tclr = l; 561 - timer->context.tldr = load; 562 565 omap_dm_timer_disable(timer); 563 566 return 0; 564 567 } ··· 561 602 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match); 562 603 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 563 604 564 - /* Save the context */ 565 - timer->context.tclr = l; 566 - timer->context.tmar = match; 567 605 omap_dm_timer_disable(timer); 568 606 return 0; 569 607 } 570 608 571 609 static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, 572 - int toggle, int trigger) 610 + int toggle, int trigger, int autoreload) 573 611 { 574 612 u32 l; 575 613 ··· 576 620 omap_dm_timer_enable(timer); 577 621 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 578 622 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM | 579 - OMAP_TIMER_CTRL_PT | (0x03 << 10)); 623 + OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR); 580 624 if (def_on) 581 625 l |= OMAP_TIMER_CTRL_SCPWM; 582 626 if (toggle) 583 627 l |= OMAP_TIMER_CTRL_PT; 584 628 l |= trigger << 10; 629 + if (autoreload) 630 + l |= OMAP_TIMER_CTRL_AR; 585 631 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 586 632 587 - /* Save the context */ 588 - timer->context.tclr = l; 589 633 omap_dm_timer_disable(timer); 590 634 return 0; 635 + } 636 + 637 + static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *timer) 638 + { 639 + u32 l; 640 + 641 + if (unlikely(!timer)) 642 + return -EINVAL; 643 + 644 + omap_dm_timer_enable(timer); 645 + l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); 646 + omap_dm_timer_disable(timer); 647 + 648 + return l; 591 649 } 592 650 593 651 static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, ··· 621 651 } 622 652 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 623 653 624 - /* Save the context */ 625 - timer->context.tclr = l; 626 654 omap_dm_timer_disable(timer); 627 655 return 0; 628 656 } ··· 634 666 omap_dm_timer_enable(timer); 635 667 __omap_dm_timer_int_enable(timer, value); 636 668 637 - /* Save the context */ 638 - timer->context.tier = value; 639 - timer->context.twer = value; 640 669 omap_dm_timer_disable(timer); 641 670 return 0; 642 671 } ··· 661 696 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask; 662 697 omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l); 663 698 664 - /* Save the context */ 665 - timer->context.tier &= ~mask; 666 - timer->context.twer &= ~mask; 667 699 omap_dm_timer_disable(timer); 668 700 return 0; 669 701 } ··· 669 707 { 670 708 unsigned int l; 671 709 672 - if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { 710 + if (unlikely(!timer || !atomic_read(&timer->enabled))) { 673 711 pr_err("%s: timer not available or enabled.\n", __func__); 674 712 return 0; 675 713 } ··· 681 719 682 720 static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) 683 721 { 684 - if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) 722 + if (unlikely(!timer || !atomic_read(&timer->enabled))) 685 723 return -EINVAL; 686 724 687 725 __omap_dm_timer_write_status(timer, value); ··· 691 729 692 730 static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer) 693 731 { 694 - if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { 732 + if (unlikely(!timer || !atomic_read(&timer->enabled))) { 695 733 pr_err("%s: timer not iavailable or enabled.\n", __func__); 696 734 return 0; 697 735 } ··· 701 739 702 740 static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value) 703 741 { 704 - if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) { 742 + if (unlikely(!timer || !atomic_read(&timer->enabled))) { 705 743 pr_err("%s: timer not available or enabled.\n", __func__); 706 744 return -EINVAL; 707 745 } ··· 728 766 } 729 767 return 0; 730 768 } 769 + 770 + static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev) 771 + { 772 + struct omap_dm_timer *timer = dev_get_drvdata(dev); 773 + 774 + atomic_set(&timer->enabled, 0); 775 + 776 + if (timer->capability & OMAP_TIMER_ALWON || !timer->func_base) 777 + return 0; 778 + 779 + omap_timer_save_context(timer); 780 + 781 + return 0; 782 + } 783 + 784 + static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev) 785 + { 786 + struct omap_dm_timer *timer = dev_get_drvdata(dev); 787 + 788 + if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base) 789 + omap_timer_restore_context(timer); 790 + 791 + atomic_set(&timer->enabled, 1); 792 + 793 + return 0; 794 + } 795 + 796 + static const struct dev_pm_ops omap_dm_timer_pm_ops = { 797 + SET_RUNTIME_PM_OPS(omap_dm_timer_runtime_suspend, 798 + omap_dm_timer_runtime_resume, NULL) 799 + }; 731 800 732 801 static const struct of_device_id omap_timer_match[]; 733 802 ··· 801 808 if (IS_ERR(timer->io_base)) 802 809 return PTR_ERR(timer->io_base); 803 810 811 + platform_set_drvdata(pdev, timer); 812 + 804 813 if (dev->of_node) { 805 814 if (of_find_property(dev->of_node, "ti,timer-alwon", NULL)) 806 815 timer->capability |= OMAP_TIMER_ALWON; ··· 816 821 timer->id = pdev->id; 817 822 timer->capability = pdata->timer_capability; 818 823 timer->reserved = omap_dm_timer_reserved_systimer(timer->id); 819 - timer->get_context_loss_count = pdata->get_context_loss_count; 824 + } 825 + 826 + if (!(timer->capability & OMAP_TIMER_ALWON)) { 827 + timer->nb.notifier_call = omap_timer_context_notifier; 828 + cpu_pm_register_notifier(&timer->nb); 820 829 } 821 830 822 831 if (pdata) ··· 874 875 list_for_each_entry(timer, &omap_timer_list, node) 875 876 if (!strcmp(dev_name(&timer->pdev->dev), 876 877 dev_name(&pdev->dev))) { 878 + if (!(timer->capability & OMAP_TIMER_ALWON)) 879 + cpu_pm_unregister_notifier(&timer->nb); 877 880 list_del(&timer->node); 878 881 ret = 0; 879 882 break; ··· 904 903 .set_load = omap_dm_timer_set_load, 905 904 .set_match = omap_dm_timer_set_match, 906 905 .set_pwm = omap_dm_timer_set_pwm, 906 + .get_pwm_status = omap_dm_timer_get_pwm_status, 907 907 .set_prescaler = omap_dm_timer_set_prescaler, 908 908 .read_counter = omap_dm_timer_read_counter, 909 909 .write_counter = omap_dm_timer_write_counter, ··· 955 953 .driver = { 956 954 .name = "omap_timer", 957 955 .of_match_table = of_match_ptr(omap_timer_match), 956 + .pm = &omap_dm_timer_pm_ops, 958 957 }, 959 958 }; 960 959
+2 -7
drivers/clocksource/timer-u300.c
··· 330 330 return IRQ_HANDLED; 331 331 } 332 332 333 - static struct irqaction u300_timer_irq = { 334 - .name = "U300 Timer Tick", 335 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 336 - .handler = u300_timer_interrupt, 337 - }; 338 - 339 333 /* 340 334 * Override the global weak sched_clock symbol with this 341 335 * local implementation which uses the clocksource to get some ··· 414 420 u300_timer_base + U300_TIMER_APP_RGPT1); 415 421 416 422 /* Set up the IRQ handler */ 417 - ret = setup_irq(irq, &u300_timer_irq); 423 + ret = request_irq(irq, u300_timer_interrupt, 424 + IRQF_TIMER | IRQF_IRQPOLL, "U300 Timer Tick", NULL); 418 425 if (ret) 419 426 return ret; 420 427
+2 -8
drivers/clocksource/timer-vf-pit.c
··· 123 123 .rating = 300, 124 124 }; 125 125 126 - static struct irqaction pit_timer_irq = { 127 - .name = "VF pit timer", 128 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 129 - .handler = pit_timer_interrupt, 130 - .dev_id = &clockevent_pit, 131 - }; 132 - 133 126 static int __init pit_clockevent_init(unsigned long rate, int irq) 134 127 { 135 128 __raw_writel(0, clkevt_base + PITTCTRL); 136 129 __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); 137 130 138 - BUG_ON(setup_irq(irq, &pit_timer_irq)); 131 + BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 132 + "VF pit timer", &clockevent_pit); 139 133 140 134 clockevent_pit.cpumask = cpumask_of(0); 141 135 clockevent_pit.irq = irq;
+3 -8
drivers/clocksource/timer-vt8500.c
··· 101 101 return IRQ_HANDLED; 102 102 } 103 103 104 - static struct irqaction irq = { 105 - .name = "vt8500_timer", 106 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 107 - .handler = vt8500_timer_interrupt, 108 - .dev_id = &clockevent, 109 - }; 110 - 111 104 static int __init vt8500_timer_init(struct device_node *np) 112 105 { 113 106 int timer_irq, ret; ··· 132 139 133 140 clockevent.cpumask = cpumask_of(0); 134 141 135 - ret = setup_irq(timer_irq, &irq); 142 + ret = request_irq(timer_irq, vt8500_timer_interrupt, 143 + IRQF_TIMER | IRQF_IRQPOLL, "vt8500_timer", 144 + &clockevent); 136 145 if (ret) { 137 146 pr_err("%s: setup_irq failed for %s\n", __func__, 138 147 clockevent.name);
+6 -7
drivers/clocksource/timer-zevio.c
··· 53 53 54 54 struct clk *clk; 55 55 struct clock_event_device clkevt; 56 - struct irqaction clkevt_irq; 57 56 58 57 char clocksource_name[64]; 59 58 char clockevent_name[64]; ··· 171 172 /* Interrupt to occur when timer value matches 0 */ 172 173 writel(0, timer->base + IO_MATCH(TIMER_MATCH)); 173 174 174 - timer->clkevt_irq.name = timer->clockevent_name; 175 - timer->clkevt_irq.handler = zevio_timer_interrupt; 176 - timer->clkevt_irq.dev_id = timer; 177 - timer->clkevt_irq.flags = IRQF_TIMER | IRQF_IRQPOLL; 178 - 179 - setup_irq(irqnr, &timer->clkevt_irq); 175 + if (request_irq(irqnr, zevio_timer_interrupt, 176 + IRQF_TIMER | IRQF_IRQPOLL, 177 + timer->clockevent_name, timer)) { 178 + pr_err("%s: request_irq() failed\n", 179 + timer->clockevent_name); 180 + } 180 181 181 182 clockevents_config_and_register(&timer->clkevt, 182 183 clk_get_rate(timer->clk), 0x0001, 0xffff);
+5 -3
drivers/pwm/pwm-omap-dmtimer.c
··· 183 183 if (timer_active) 184 184 omap->pdata->stop(omap->dm_timer); 185 185 186 - omap->pdata->set_load(omap->dm_timer, true, load_value); 186 + omap->pdata->set_load(omap->dm_timer, load_value); 187 187 omap->pdata->set_match(omap->dm_timer, true, match_value); 188 188 189 189 dev_dbg(chip->dev, "load value: %#08x (%d), match value: %#08x (%d)\n", ··· 192 192 omap->pdata->set_pwm(omap->dm_timer, 193 193 pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED, 194 194 true, 195 - PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE); 195 + PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE, 196 + true); 196 197 197 198 /* If config was called while timer was running it must be reenabled. */ 198 199 if (timer_active) ··· 223 222 omap->pdata->set_pwm(omap->dm_timer, 224 223 polarity == PWM_POLARITY_INVERSED, 225 224 true, 226 - PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE); 225 + PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE, 226 + true); 227 227 mutex_unlock(&omap->mutex); 228 228 229 229 return 0;
-14
include/asm-generic/vdso/vsyscall.h
··· 11 11 } 12 12 #endif /* __arch_get_k_vdso_data */ 13 13 14 - #ifndef __arch_update_vdso_data 15 - static __always_inline bool __arch_update_vdso_data(void) 16 - { 17 - return true; 18 - } 19 - #endif /* __arch_update_vdso_data */ 20 - 21 - #ifndef __arch_get_clock_mode 22 - static __always_inline int __arch_get_clock_mode(struct timekeeper *tk) 23 - { 24 - return 0; 25 - } 26 - #endif /* __arch_get_clock_mode */ 27 - 28 14 #ifndef __arch_update_vsyscall 29 15 static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, 30 16 struct timekeeper *tk)
+2 -2
include/clocksource/timer-ti-dm.h
··· 105 105 void __iomem *pend; /* write pending */ 106 106 void __iomem *func_base; /* function register base */ 107 107 108 + atomic_t enabled; 108 109 unsigned long rate; 109 110 unsigned reserved:1; 110 111 unsigned posted:1; 111 112 struct timer_regs context; 112 - int (*get_context_loss_count)(struct device *); 113 - int ctx_loss_count; 114 113 int revision; 115 114 u32 capability; 116 115 u32 errata; 117 116 struct platform_device *pdev; 118 117 struct list_head node; 118 + struct notifier_block nb; 119 119 }; 120 120 121 121 int omap_dm_timer_reserve_systimer(int id);
+1 -1
include/linux/bits.h
··· 3 3 #define __LINUX_BITS_H 4 4 5 5 #include <linux/const.h> 6 + #include <vdso/bits.h> 6 7 #include <asm/bitsperlong.h> 7 8 8 - #define BIT(nr) (UL(1) << (nr)) 9 9 #define BIT_ULL(nr) (ULL(1) << (nr)) 10 10 #define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) 11 11 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+52 -41
include/linux/clocksource.h
··· 23 23 struct clocksource; 24 24 struct module; 25 25 26 - #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 26 + #if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \ 27 + defined(CONFIG_GENERIC_GETTIMEOFDAY) 27 28 #include <asm/clocksource.h> 28 29 #endif 30 + 31 + #include <vdso/clocksource.h> 29 32 30 33 /** 31 34 * struct clocksource - hardware abstraction for a free running counter 32 35 * Provides mostly state-free accessors to the underlying hardware. 33 36 * This is the structure used for system time. 34 37 * 35 - * @name: ptr to clocksource name 36 - * @list: list head for registration 37 - * @rating: rating value for selection (higher is better) 38 + * @read: Returns a cycle value, passes clocksource as argument 39 + * @mask: Bitmask for two's complement 40 + * subtraction of non 64 bit counters 41 + * @mult: Cycle to nanosecond multiplier 42 + * @shift: Cycle to nanosecond divisor (power of two) 43 + * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) 44 + * @maxadj: Maximum adjustment value to mult (~11%) 45 + * @archdata: Optional arch-specific data 46 + * @max_cycles: Maximum safe cycle value which won't overflow on 47 + * multiplication 48 + * @name: Pointer to clocksource name 49 + * @list: List head for registration (internal) 50 + * @rating: Rating value for selection (higher is better) 38 51 * To avoid rating inflation the following 39 52 * list should give you a guide as to how 40 53 * to assign your clocksource a rating ··· 62 49 * 400-499: Perfect 63 50 * The ideal clocksource. A must-use where 64 51 * available. 65 - * @read: returns a cycle value, passes clocksource as argument 66 - * @enable: optional function to enable the clocksource 67 - * @disable: optional function to disable the clocksource 68 - * @mask: bitmask for two's complement 69 - * subtraction of non 64 bit counters 70 - * @mult: cycle to nanosecond multiplier 71 - * @shift: cycle to nanosecond divisor (power of two) 72 - * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 73 - * @maxadj: maximum adjustment value to mult (~11%) 74 - * @max_cycles: maximum safe cycle value which won't overflow on multiplication 75 - * @flags: flags describing special properties 76 - * @archdata: arch-specific data 77 - * @suspend: suspend function for the clocksource, if necessary 78 - * @resume: resume function for the clocksource, if necessary 52 + * @flags: Flags describing special properties 53 + * @enable: Optional function to enable the clocksource 54 + * @disable: Optional function to disable the clocksource 55 + * @suspend: Optional suspend function for the clocksource 56 + * @resume: Optional resume function for the clocksource 79 57 * @mark_unstable: Optional function to inform the clocksource driver that 80 58 * the watchdog marked the clocksource unstable 81 - * @owner: module reference, must be set by clocksource in modules 59 + * @tick_stable: Optional function called periodically from the watchdog 60 + * code to provide stable syncrhonization points 61 + * @wd_list: List head to enqueue into the watchdog list (internal) 62 + * @cs_last: Last clocksource value for clocksource watchdog 63 + * @wd_last: Last watchdog value corresponding to @cs_last 64 + * @owner: Module reference, must be set by clocksource in modules 82 65 * 83 66 * Note: This struct is not used in hotpathes of the timekeeping code 84 67 * because the timekeeper caches the hot path fields in its own data 85 - * structure, so no line cache alignment is required, 68 + * structure, so no cache line alignment is required, 86 69 * 87 70 * The pointer to the clocksource itself is handed to the read 88 71 * callback. If you need extra information there you can wrap struct ··· 87 78 * structure. 88 79 */ 89 80 struct clocksource { 90 - u64 (*read)(struct clocksource *cs); 91 - u64 mask; 92 - u32 mult; 93 - u32 shift; 94 - u64 max_idle_ns; 95 - u32 maxadj; 81 + u64 (*read)(struct clocksource *cs); 82 + u64 mask; 83 + u32 mult; 84 + u32 shift; 85 + u64 max_idle_ns; 86 + u32 maxadj; 96 87 #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 97 88 struct arch_clocksource_data archdata; 98 89 #endif 99 - u64 max_cycles; 100 - const char *name; 101 - struct list_head list; 102 - int rating; 103 - int (*enable)(struct clocksource *cs); 104 - void (*disable)(struct clocksource *cs); 105 - unsigned long flags; 106 - void (*suspend)(struct clocksource *cs); 107 - void (*resume)(struct clocksource *cs); 108 - void (*mark_unstable)(struct clocksource *cs); 109 - void (*tick_stable)(struct clocksource *cs); 90 + u64 max_cycles; 91 + const char *name; 92 + struct list_head list; 93 + int rating; 94 + enum vdso_clock_mode vdso_clock_mode; 95 + unsigned long flags; 96 + 97 + int (*enable)(struct clocksource *cs); 98 + void (*disable)(struct clocksource *cs); 99 + void (*suspend)(struct clocksource *cs); 100 + void (*resume)(struct clocksource *cs); 101 + void (*mark_unstable)(struct clocksource *cs); 102 + void (*tick_stable)(struct clocksource *cs); 110 103 111 104 /* private: */ 112 105 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 113 106 /* Watchdog related data, used by the framework */ 114 - struct list_head wd_list; 115 - u64 cs_last; 116 - u64 wd_last; 107 + struct list_head wd_list; 108 + u64 cs_last; 109 + u64 wd_last; 117 110 #endif 118 - struct module *owner; 111 + struct module *owner; 119 112 }; 120 113 121 114 /*
+1 -4
include/linux/const.h
··· 1 1 #ifndef _LINUX_CONST_H 2 2 #define _LINUX_CONST_H 3 3 4 - #include <uapi/linux/const.h> 5 - 6 - #define UL(x) (_UL(x)) 7 - #define ULL(x) (_ULL(x)) 4 + #include <vdso/const.h> 8 5 9 6 #endif /* _LINUX_CONST_H */
-1
include/linux/dw_apb_timer.h
··· 25 25 struct dw_apb_clock_event_device { 26 26 struct clock_event_device ced; 27 27 struct dw_apb_timer timer; 28 - struct irqaction irqaction; 29 28 void (*eoi)(struct dw_apb_timer *); 30 29 }; 31 30
+1 -1
include/linux/elfnote.h
··· 59 59 ELFNOTE_END 60 60 61 61 #else /* !__ASSEMBLER__ */ 62 - #include <linux/elf.h> 62 + #include <uapi/linux/elf.h> 63 63 /* 64 64 * Use an anonymous structure which matches the shape of 65 65 * Elf{32,64}_Nhdr, but includes the name and desc data. The size and
+1 -3
include/linux/jiffies.h
··· 8 8 #include <linux/types.h> 9 9 #include <linux/time.h> 10 10 #include <linux/timex.h> 11 + #include <vdso/jiffies.h> 11 12 #include <asm/param.h> /* for HZ */ 12 13 #include <generated/timeconst.h> 13 14 ··· 59 58 #define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ 60 59 61 60 extern int register_refined_jiffies(long clock_tick_rate); 62 - 63 - /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ 64 - #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) 65 61 66 62 /* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ 67 63 #define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
+1 -8
include/linux/ktime.h
··· 216 216 } 217 217 } 218 218 219 - /* 220 - * The resolution of the clocks. The resolution value is returned in 221 - * the clock_getres() system call to give application programmers an 222 - * idea of the (in)accuracy of timers. Timer values are rounded up to 223 - * this resolution values. 224 - */ 225 - #define LOW_RES_NSEC TICK_NSEC 226 - #define KTIME_LOW_RES (LOW_RES_NSEC) 219 + #include <vdso/ktime.h> 227 220 228 221 static inline ktime_t ns_to_ktime(u64 ns) 229 222 {
+1 -12
include/linux/limits.h
··· 4 4 5 5 #include <uapi/linux/limits.h> 6 6 #include <linux/types.h> 7 + #include <vdso/limits.h> 7 8 8 - #define USHRT_MAX ((unsigned short)~0U) 9 - #define SHRT_MAX ((short)(USHRT_MAX >> 1)) 10 - #define SHRT_MIN ((short)(-SHRT_MAX - 1)) 11 - #define INT_MAX ((int)(~0U >> 1)) 12 - #define INT_MIN (-INT_MAX - 1) 13 - #define UINT_MAX (~0U) 14 - #define LONG_MAX ((long)(~0UL >> 1)) 15 - #define LONG_MIN (-LONG_MAX - 1) 16 - #define ULONG_MAX (~0UL) 17 - #define LLONG_MAX ((long long)(~0ULL >> 1)) 18 - #define LLONG_MIN (-LLONG_MAX - 1) 19 - #define ULLONG_MAX (~0ULL) 20 9 #define SIZE_MAX (~(size_t)0) 21 10 #define PHYS_ADDR_MAX (~(phys_addr_t)0) 22 11
+1 -19
include/linux/math64.h
··· 3 3 #define _LINUX_MATH64_H 4 4 5 5 #include <linux/types.h> 6 + #include <vdso/math64.h> 6 7 #include <asm/div64.h> 7 8 8 9 #if BITS_PER_LONG == 64 ··· 142 141 #endif 143 142 144 143 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); 145 - 146 - static __always_inline u32 147 - __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 148 - { 149 - u32 ret = 0; 150 - 151 - while (dividend >= divisor) { 152 - /* The following asm() prevents the compiler from 153 - optimising this loop into a modulo operation. */ 154 - asm("" : "+rm"(dividend)); 155 - 156 - dividend -= divisor; 157 - ret++; 158 - } 159 - 160 - *remainder = dividend; 161 - 162 - return ret; 163 - } 164 144 165 145 #ifndef mul_u32_u32 166 146 /*
+3 -3
include/linux/platform_data/dmtimer-omap.h
··· 30 30 int (*stop)(struct omap_dm_timer *timer); 31 31 int (*set_source)(struct omap_dm_timer *timer, int source); 32 32 33 - int (*set_load)(struct omap_dm_timer *timer, int autoreload, 34 - unsigned int value); 33 + int (*set_load)(struct omap_dm_timer *timer, unsigned int value); 35 34 int (*set_match)(struct omap_dm_timer *timer, int enable, 36 35 unsigned int match); 37 36 int (*set_pwm)(struct omap_dm_timer *timer, int def_on, 38 - int toggle, int trigger); 37 + int toggle, int trigger, int autoreload); 38 + int (*get_pwm_status)(struct omap_dm_timer *timer); 39 39 int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); 40 40 41 41 unsigned int (*read_counter)(struct omap_dm_timer *timer);
+1 -1
include/linux/posix-timers.h
··· 69 69 struct cpu_timer { 70 70 struct timerqueue_node node; 71 71 struct timerqueue_head *head; 72 - struct task_struct *task; 72 + struct pid *pid; 73 73 struct list_head elist; 74 74 int firing; 75 75 };
+1 -4
include/linux/time.h
··· 111 111 */ 112 112 #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) 113 113 114 - struct timens_offset { 115 - s64 sec; 116 - u64 nsec; 117 - }; 114 + # include <vdso/time.h> 118 115 119 116 #endif
+1 -11
include/linux/time32.h
··· 12 12 #include <linux/time64.h> 13 13 #include <linux/timex.h> 14 14 15 - typedef s32 old_time32_t; 16 - 17 - struct old_timespec32 { 18 - old_time32_t tv_sec; 19 - s32 tv_nsec; 20 - }; 21 - 22 - struct old_timeval32 { 23 - old_time32_t tv_sec; 24 - s32 tv_usec; 25 - }; 15 + #include <vdso/time32.h> 26 16 27 17 struct old_itimerspec32 { 28 18 struct old_timespec32 it_interval;
+1 -9
include/linux/time64.h
··· 3 3 #define _LINUX_TIME64_H 4 4 5 5 #include <linux/math64.h> 6 + #include <vdso/time64.h> 6 7 7 8 typedef __s64 time64_t; 8 9 typedef __u64 timeu64_t; ··· 19 18 struct timespec64 it_interval; 20 19 struct timespec64 it_value; 21 20 }; 22 - 23 - /* Parameters used to convert the timespec values: */ 24 - #define MSEC_PER_SEC 1000L 25 - #define USEC_PER_MSEC 1000L 26 - #define NSEC_PER_USEC 1000L 27 - #define NSEC_PER_MSEC 1000000L 28 - #define USEC_PER_SEC 1000000L 29 - #define NSEC_PER_SEC 1000000000L 30 - #define FSEC_PER_SEC 1000000000000000LL 31 21 32 22 /* Located here for timespec[64]_valid_strict */ 33 23 #define TIME64_MAX ((s64)~((u64)1 << 63))
+9
include/vdso/bits.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_BITS_H 3 + #define __VDSO_BITS_H 4 + 5 + #include <vdso/const.h> 6 + 7 + #define BIT(nr) (UL(1) << (nr)) 8 + 9 + #endif /* __VDSO_BITS_H */
+22
include/vdso/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_CLOCKSOURCE_H 3 + #define __VDSO_CLOCKSOURCE_H 4 + 5 + #include <vdso/limits.h> 6 + 7 + #ifdef CONFIG_GENERIC_GETTIMEOFDAY 8 + #include <asm/vdso/clocksource.h> 9 + #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ 10 + 11 + enum vdso_clock_mode { 12 + VDSO_CLOCKMODE_NONE, 13 + #ifdef CONFIG_GENERIC_GETTIMEOFDAY 14 + VDSO_ARCH_CLOCKMODES, 15 + #endif 16 + VDSO_CLOCKMODE_MAX, 17 + 18 + /* Indicator for time namespace VDSO */ 19 + VDSO_CLOCKMODE_TIMENS = INT_MAX 20 + }; 21 + 22 + #endif /* __VDSO_CLOCKSOURCE_H */
+10
include/vdso/const.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_CONST_H 3 + #define __VDSO_CONST_H 4 + 5 + #include <uapi/linux/const.h> 6 + 7 + #define UL(x) (_UL(x)) 8 + #define ULL(x) (_ULL(x)) 9 + 10 + #endif /* __VDSO_CONST_H */
+30 -5
include/vdso/datapage.h
··· 4 4 5 5 #ifndef __ASSEMBLY__ 6 6 7 - #include <linux/bits.h> 8 - #include <linux/time.h> 9 - #include <linux/types.h> 7 + #include <linux/compiler.h> 8 + #include <uapi/linux/time.h> 9 + #include <uapi/linux/types.h> 10 + #include <uapi/asm-generic/errno-base.h> 11 + 12 + #include <vdso/bits.h> 13 + #include <vdso/clocksource.h> 14 + #include <vdso/ktime.h> 15 + #include <vdso/limits.h> 16 + #include <vdso/math64.h> 17 + #include <vdso/processor.h> 18 + #include <vdso/time.h> 19 + #include <vdso/time32.h> 20 + #include <vdso/time64.h> 10 21 11 22 #define VDSO_BASES (CLOCK_TAI + 1) 12 23 #define VDSO_HRES (BIT(CLOCK_REALTIME) | \ ··· 31 20 #define CS_HRES_COARSE 0 32 21 #define CS_RAW 1 33 22 #define CS_BASES (CS_RAW + 1) 34 - 35 - #define VCLOCK_TIMENS UINT_MAX 36 23 37 24 /** 38 25 * struct vdso_timestamp - basetime per clock_id ··· 109 100 * relocation, and this is what we need. 110 101 */ 111 102 extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); 103 + 104 + /* 105 + * The generic vDSO implementation requires that gettimeofday.h 106 + * provides: 107 + * - __arch_get_vdso_data(): to get the vdso datapage. 108 + * - __arch_get_hw_counter(): to get the hw counter based on the 109 + * clock_mode. 110 + * - gettimeofday_fallback(): fallback for gettimeofday. 111 + * - clock_gettime_fallback(): fallback for clock_gettime. 112 + * - clock_getres_fallback(): fallback for clock_getres. 113 + */ 114 + #ifdef ENABLE_COMPAT_VDSO 115 + #include <asm/vdso/compat_gettimeofday.h> 116 + #else 117 + #include <asm/vdso/gettimeofday.h> 118 + #endif /* ENABLE_COMPAT_VDSO */ 112 119 113 120 #endif /* !__ASSEMBLY__ */ 114 121
+11
include/vdso/jiffies.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_JIFFIES_H 3 + #define __VDSO_JIFFIES_H 4 + 5 + #include <asm/param.h> /* for HZ */ 6 + #include <vdso/time64.h> 7 + 8 + /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ 9 + #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) 10 + 11 + #endif /* __VDSO_JIFFIES_H */
+16
include/vdso/ktime.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_KTIME_H 3 + #define __VDSO_KTIME_H 4 + 5 + #include <vdso/jiffies.h> 6 + 7 + /* 8 + * The resolution of the clocks. The resolution value is returned in 9 + * the clock_getres() system call to give application programmers an 10 + * idea of the (in)accuracy of timers. Timer values are rounded up to 11 + * this resolution values. 12 + */ 13 + #define LOW_RES_NSEC TICK_NSEC 14 + #define KTIME_LOW_RES (LOW_RES_NSEC) 15 + 16 + #endif /* __VDSO_KTIME_H */
+19
include/vdso/limits.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_LIMITS_H 3 + #define __VDSO_LIMITS_H 4 + 5 + #define USHRT_MAX ((unsigned short)~0U) 6 + #define SHRT_MAX ((short)(USHRT_MAX >> 1)) 7 + #define SHRT_MIN ((short)(-SHRT_MAX - 1)) 8 + #define INT_MAX ((int)(~0U >> 1)) 9 + #define INT_MIN (-INT_MAX - 1) 10 + #define UINT_MAX (~0U) 11 + #define LONG_MAX ((long)(~0UL >> 1)) 12 + #define LONG_MIN (-LONG_MAX - 1) 13 + #define ULONG_MAX (~0UL) 14 + #define LLONG_MAX ((long long)(~0ULL >> 1)) 15 + #define LLONG_MIN (-LLONG_MAX - 1) 16 + #define ULLONG_MAX (~0ULL) 17 + #define UINTPTR_MAX ULONG_MAX 18 + 19 + #endif /* __VDSO_LIMITS_H */
+24
include/vdso/math64.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_MATH64_H 3 + #define __VDSO_MATH64_H 4 + 5 + static __always_inline u32 6 + __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 7 + { 8 + u32 ret = 0; 9 + 10 + while (dividend >= divisor) { 11 + /* The following asm() prevents the compiler from 12 + optimising this loop into a modulo operation. */ 13 + asm("" : "+rm"(dividend)); 14 + 15 + dividend -= divisor; 16 + ret++; 17 + } 18 + 19 + *remainder = dividend; 20 + 21 + return ret; 22 + } 23 + 24 + #endif /* __VDSO_MATH64_H */
+14
include/vdso/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 ARM Ltd. 4 + */ 5 + #ifndef __VDSO_PROCESSOR_H 6 + #define __VDSO_PROCESSOR_H 7 + 8 + #ifndef __ASSEMBLY__ 9 + 10 + #include <asm/vdso/processor.h> 11 + 12 + #endif /* __ASSEMBLY__ */ 13 + 14 + #endif /* __VDSO_PROCESSOR_H */
+12
include/vdso/time.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_TIME_H 3 + #define __VDSO_TIME_H 4 + 5 + #include <uapi/linux/types.h> 6 + 7 + struct timens_offset { 8 + s64 sec; 9 + u64 nsec; 10 + }; 11 + 12 + #endif /* __VDSO_TIME_H */
+17
include/vdso/time32.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_TIME32_H 3 + #define __VDSO_TIME32_H 4 + 5 + typedef s32 old_time32_t; 6 + 7 + struct old_timespec32 { 8 + old_time32_t tv_sec; 9 + s32 tv_nsec; 10 + }; 11 + 12 + struct old_timeval32 { 13 + old_time32_t tv_sec; 14 + s32 tv_usec; 15 + }; 16 + 17 + #endif /* __VDSO_TIME32_H */
+14
include/vdso/time64.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __VDSO_TIME64_H 3 + #define __VDSO_TIME64_H 4 + 5 + /* Parameters used to convert the timespec values: */ 6 + #define MSEC_PER_SEC 1000L 7 + #define USEC_PER_MSEC 1000L 8 + #define NSEC_PER_USEC 1000L 9 + #define NSEC_PER_MSEC 1000000L 10 + #define USEC_PER_SEC 1000000L 11 + #define NSEC_PER_SEC 1000000000L 12 + #define FSEC_PER_SEC 1000000000000000LL 13 + 14 + #endif /* __VDSO_TIME64_H */
+1 -10
kernel/exit.c
··· 103 103 104 104 #ifdef CONFIG_POSIX_TIMERS 105 105 posix_cpu_timers_exit(tsk); 106 - if (group_dead) { 106 + if (group_dead) 107 107 posix_cpu_timers_exit_group(tsk); 108 - } else { 109 - /* 110 - * This can only happen if the caller is de_thread(). 111 - * FIXME: this is the temporary hack, we should teach 112 - * posix-cpu-timers to handle this case correctly. 113 - */ 114 - if (unlikely(has_group_leader_pid(tsk))) 115 - posix_cpu_timers_exit_group(tsk); 116 - } 117 108 #endif 118 109 119 110 if (group_dead) {
+9
kernel/time/clocksource.c
··· 928 928 929 929 clocksource_arch_init(cs); 930 930 931 + #ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE 932 + if (cs->vdso_clock_mode < 0 || 933 + cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { 934 + pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", 935 + cs->name, cs->vdso_clock_mode); 936 + cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; 937 + } 938 + #endif 939 + 931 940 /* Initialize mult/shift and max_idle_ns */ 932 941 __clocksource_update_freq_scale(cs, scale, freq); 933 942
+1 -1
kernel/time/hrtimer.c
··· 311 311 div >>= 1; 312 312 } 313 313 tmp >>= sft; 314 - do_div(tmp, (unsigned long) div); 314 + do_div(tmp, (u32) div); 315 315 return dclc < 0 ? -tmp : tmp; 316 316 } 317 317 EXPORT_SYMBOL_GPL(__ktime_divns);
+4 -3
kernel/time/namespace.c
··· 8 8 #include <linux/user_namespace.h> 9 9 #include <linux/sched/signal.h> 10 10 #include <linux/sched/task.h> 11 + #include <linux/clocksource.h> 11 12 #include <linux/seq_file.h> 12 13 #include <linux/proc_ns.h> 13 14 #include <linux/export.h> ··· 173 172 * for vdso_data->clock_mode is a non-issue. The task is spin waiting for the 174 173 * update to finish and for 'seq' to become even anyway. 175 174 * 176 - * Timens page has vdso_data->clock_mode set to VCLOCK_TIMENS which enforces 177 - * the time namespace handling path. 175 + * Timens page has vdso_data->clock_mode set to VDSO_CLOCKMODE_TIMENS which 176 + * enforces the time namespace handling path. 178 177 */ 179 178 static void timens_setup_vdso_data(struct vdso_data *vdata, 180 179 struct time_namespace *ns) ··· 184 183 struct timens_offset boottime = offset_from_ts(ns->offsets.boottime); 185 184 186 185 vdata->seq = 1; 187 - vdata->clock_mode = VCLOCK_TIMENS; 186 + vdata->clock_mode = VDSO_CLOCKMODE_TIMENS; 188 187 offset[CLOCK_MONOTONIC] = monotonic; 189 188 offset[CLOCK_MONOTONIC_RAW] = monotonic; 190 189 offset[CLOCK_MONOTONIC_COARSE] = monotonic;
+70 -78
kernel/time/posix-cpu-timers.c
··· 118 118 return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL; 119 119 } 120 120 121 + static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer) 122 + { 123 + return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID; 124 + } 125 + 126 + static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer) 127 + { 128 + return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer)); 129 + } 130 + 121 131 /* 122 132 * Update expiry time from increment, and increase overrun count, 123 133 * given the current clock sample. ··· 346 336 /* 347 337 * Sample a process (thread group) clock for the given task clkid. If the 348 338 * group's cputime accounting is already enabled, read the atomic 349 - * store. Otherwise a full update is required. Task's sighand lock must be 350 - * held to protect the task traversal on a full update. clkid is already 351 - * validated. 339 + * store. Otherwise a full update is required. clkid is already validated. 352 340 */ 353 341 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p, 354 342 bool start) ··· 401 393 402 394 new_timer->kclock = &clock_posix_cpu; 403 395 timerqueue_init(&new_timer->it.cpu.node); 404 - new_timer->it.cpu.task = p; 396 + new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer)); 397 + /* 398 + * get_task_for_clock() took a reference on @p. Drop it as the timer 399 + * holds a reference on the pid of @p. 400 + */ 401 + put_task_struct(p); 405 402 return 0; 406 403 } 407 404 ··· 419 406 static int posix_cpu_timer_del(struct k_itimer *timer) 420 407 { 421 408 struct cpu_timer *ctmr = &timer->it.cpu; 422 - struct task_struct *p = ctmr->task; 423 409 struct sighand_struct *sighand; 410 + struct task_struct *p; 424 411 unsigned long flags; 425 412 int ret = 0; 426 413 427 - if (WARN_ON_ONCE(!p)) 428 - return -EINVAL; 414 + rcu_read_lock(); 415 + p = cpu_timer_task_rcu(timer); 416 + if (!p) 417 + goto out; 429 418 430 419 /* 431 420 * Protect against sighand release/switch in exit/exec and process/ ··· 449 434 unlock_task_sighand(p, &flags); 450 435 } 451 436 437 + out: 438 + rcu_read_unlock(); 452 439 if (!ret) 453 - put_task_struct(p); 440 + put_pid(ctmr->pid); 454 441 455 442 return ret; 456 443 } ··· 501 484 * Insert the timer on the appropriate list before any timers that 502 485 * expire later. This must be called with the sighand lock held. 503 486 */ 504 - static void arm_timer(struct k_itimer *timer) 487 + static void arm_timer(struct k_itimer *timer, struct task_struct *p) 505 488 { 506 489 int clkidx = CPUCLOCK_WHICH(timer->it_clock); 507 490 struct cpu_timer *ctmr = &timer->it.cpu; 508 491 u64 newexp = cpu_timer_getexpires(ctmr); 509 - struct task_struct *p = ctmr->task; 510 492 struct posix_cputimer_base *base; 511 493 512 494 if (CPUCLOCK_PERTHREAD(timer->it_clock)) ··· 580 564 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); 581 565 u64 old_expires, new_expires, old_incr, val; 582 566 struct cpu_timer *ctmr = &timer->it.cpu; 583 - struct task_struct *p = ctmr->task; 584 567 struct sighand_struct *sighand; 568 + struct task_struct *p; 585 569 unsigned long flags; 586 570 int ret = 0; 587 571 588 - if (WARN_ON_ONCE(!p)) 589 - return -EINVAL; 572 + rcu_read_lock(); 573 + p = cpu_timer_task_rcu(timer); 574 + if (!p) { 575 + /* 576 + * If p has just been reaped, we can no 577 + * longer get any information about it at all. 578 + */ 579 + rcu_read_unlock(); 580 + return -ESRCH; 581 + } 590 582 591 583 /* 592 584 * Use the to_ktime conversion because that clamps the maximum ··· 611 587 * If p has just been reaped, we can no 612 588 * longer get any information about it at all. 613 589 */ 614 - if (unlikely(sighand == NULL)) 590 + if (unlikely(sighand == NULL)) { 591 + rcu_read_unlock(); 615 592 return -ESRCH; 593 + } 616 594 617 595 /* 618 596 * Disarm any old timer after extracting its expiry time. ··· 688 662 */ 689 663 cpu_timer_setexpires(ctmr, new_expires); 690 664 if (new_expires != 0 && val < new_expires) { 691 - arm_timer(timer); 665 + arm_timer(timer, p); 692 666 } 693 667 694 668 unlock_task_sighand(p, &flags); ··· 719 693 720 694 ret = 0; 721 695 out: 696 + rcu_read_unlock(); 722 697 if (old) 723 698 old->it_interval = ns_to_timespec64(old_incr); 724 699 ··· 731 704 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); 732 705 struct cpu_timer *ctmr = &timer->it.cpu; 733 706 u64 now, expires = cpu_timer_getexpires(ctmr); 734 - struct task_struct *p = ctmr->task; 707 + struct task_struct *p; 735 708 736 - if (WARN_ON_ONCE(!p)) 737 - return; 709 + rcu_read_lock(); 710 + p = cpu_timer_task_rcu(timer); 711 + if (!p) 712 + goto out; 738 713 739 714 /* 740 715 * Easy part: convert the reload time. ··· 744 715 itp->it_interval = ktime_to_timespec64(timer->it_interval); 745 716 746 717 if (!expires) 747 - return; 718 + goto out; 748 719 749 720 /* 750 721 * Sample the clock to take the difference with the expiry time. 751 722 */ 752 - if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 723 + if (CPUCLOCK_PERTHREAD(timer->it_clock)) 753 724 now = cpu_clock_sample(clkid, p); 754 - } else { 755 - struct sighand_struct *sighand; 756 - unsigned long flags; 757 - 758 - /* 759 - * Protect against sighand release/switch in exit/exec and 760 - * also make timer sampling safe if it ends up calling 761 - * thread_group_cputime(). 762 - */ 763 - sighand = lock_task_sighand(p, &flags); 764 - if (unlikely(sighand == NULL)) { 765 - /* 766 - * The process has been reaped. 767 - * We can't even collect a sample any more. 768 - * Disarm the timer, nothing else to do. 769 - */ 770 - cpu_timer_setexpires(ctmr, 0); 771 - return; 772 - } else { 773 - now = cpu_clock_sample_group(clkid, p, false); 774 - unlock_task_sighand(p, &flags); 775 - } 776 - } 725 + else 726 + now = cpu_clock_sample_group(clkid, p, false); 777 727 778 728 if (now < expires) { 779 729 itp->it_value = ns_to_timespec64(expires - now); ··· 764 756 itp->it_value.tv_nsec = 1; 765 757 itp->it_value.tv_sec = 0; 766 758 } 759 + out: 760 + rcu_read_unlock(); 767 761 } 768 762 769 763 #define MAX_COLLECTED 20 ··· 986 976 static void posix_cpu_timer_rearm(struct k_itimer *timer) 987 977 { 988 978 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); 989 - struct cpu_timer *ctmr = &timer->it.cpu; 990 - struct task_struct *p = ctmr->task; 979 + struct task_struct *p; 991 980 struct sighand_struct *sighand; 992 981 unsigned long flags; 993 982 u64 now; 994 983 995 - if (WARN_ON_ONCE(!p)) 996 - return; 984 + rcu_read_lock(); 985 + p = cpu_timer_task_rcu(timer); 986 + if (!p) 987 + goto out; 997 988 998 989 /* 999 990 * Fetch the current sample and update the timer's expiry time. 1000 991 */ 1001 - if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 992 + if (CPUCLOCK_PERTHREAD(timer->it_clock)) 1002 993 now = cpu_clock_sample(clkid, p); 1003 - bump_cpu_timer(timer, now); 1004 - if (unlikely(p->exit_state)) 1005 - return; 1006 - 1007 - /* Protect timer list r/w in arm_timer() */ 1008 - sighand = lock_task_sighand(p, &flags); 1009 - if (!sighand) 1010 - return; 1011 - } else { 1012 - /* 1013 - * Protect arm_timer() and timer sampling in case of call to 1014 - * thread_group_cputime(). 1015 - */ 1016 - sighand = lock_task_sighand(p, &flags); 1017 - if (unlikely(sighand == NULL)) { 1018 - /* 1019 - * The process has been reaped. 1020 - * We can't even collect a sample any more. 1021 - */ 1022 - cpu_timer_setexpires(ctmr, 0); 1023 - return; 1024 - } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1025 - /* If the process is dying, no need to rearm */ 1026 - goto unlock; 1027 - } 994 + else 1028 995 now = cpu_clock_sample_group(clkid, p, true); 1029 - bump_cpu_timer(timer, now); 1030 - /* Leave the sighand locked for the call below. */ 1031 - } 996 + 997 + bump_cpu_timer(timer, now); 998 + 999 + /* Protect timer list r/w in arm_timer() */ 1000 + sighand = lock_task_sighand(p, &flags); 1001 + if (unlikely(sighand == NULL)) 1002 + goto out; 1032 1003 1033 1004 /* 1034 1005 * Now re-arm for the new expiry time. 1035 1006 */ 1036 - arm_timer(timer); 1037 - unlock: 1007 + arm_timer(timer, p); 1038 1008 unlock_task_sighand(p, &flags); 1009 + out: 1010 + rcu_read_unlock(); 1039 1011 } 1040 1012 1041 1013 /**
+2 -1
kernel/time/posix-timers.c
··· 121 121 { 122 122 struct k_itimer *timer; 123 123 124 - hlist_for_each_entry_rcu(timer, head, t_hash) { 124 + hlist_for_each_entry_rcu(timer, head, t_hash, 125 + lockdep_is_held(&hash_lock)) { 125 126 if ((timer->it_signal == sig) && (timer->it_id == id)) 126 127 return timer; 127 128 }
+5 -4
kernel/time/sched_clock.c
··· 208 208 209 209 if (sched_clock_timer.function != NULL) { 210 210 /* update timeout for clock wrap */ 211 - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); 211 + hrtimer_start(&sched_clock_timer, cd.wrap_kt, 212 + HRTIMER_MODE_REL_HARD); 212 213 } 213 214 214 215 r = rate; ··· 255 254 * Start the timer to keep sched_clock() properly updated and 256 255 * sets the initial epoch. 257 256 */ 258 - hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 257 + hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 259 258 sched_clock_timer.function = sched_clock_poll; 260 - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); 259 + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); 261 260 } 262 261 263 262 /* ··· 294 293 struct clock_read_data *rd = &cd.read_data[0]; 295 294 296 295 rd->epoch_cyc = cd.actual_read_sched_clock(); 297 - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); 296 + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); 298 297 rd->read_sched_clock = cd.actual_read_sched_clock; 299 298 } 300 299
+1 -2
kernel/time/timekeeping.c
··· 1005 1005 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) 1006 1006 return -EOVERFLOW; 1007 1007 tmp *= mult; 1008 - rem *= mult; 1009 1008 1010 - do_div(rem, div); 1009 + rem = div64_u64(rem * mult, div); 1011 1010 *base = tmp + rem; 1012 1011 return 0; 1013 1012 }
+9 -7
kernel/time/timer.c
··· 1829 1829 * schedule_timeout - sleep until timeout 1830 1830 * @timeout: timeout value in jiffies 1831 1831 * 1832 - * Make the current task sleep until @timeout jiffies have 1833 - * elapsed. The routine will return immediately unless 1834 - * the current task state has been set (see set_current_state()). 1832 + * Make the current task sleep until @timeout jiffies have elapsed. 1833 + * The function behavior depends on the current task state 1834 + * (see also set_current_state() description): 1835 1835 * 1836 - * You can set the task state as follows - 1836 + * %TASK_RUNNING - the scheduler is called, but the task does not sleep 1837 + * at all. That happens because sched_submit_work() does nothing for 1838 + * tasks in %TASK_RUNNING state. 1837 1839 * 1838 1840 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1839 1841 * pass before the routine returns unless the current task is explicitly 1840 - * woken up, (e.g. by wake_up_process())". 1842 + * woken up, (e.g. by wake_up_process()). 1841 1843 * 1842 1844 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1843 1845 * delivered to the current task or the current task is explicitly woken 1844 1846 * up. 1845 1847 * 1846 - * The current task state is guaranteed to be TASK_RUNNING when this 1848 + * The current task state is guaranteed to be %TASK_RUNNING when this 1847 1849 * routine returns. 1848 1850 * 1849 1851 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule ··· 1853 1851 * value will be %MAX_SCHEDULE_TIMEOUT. 1854 1852 * 1855 1853 * Returns 0 when the timer has expired otherwise the remaining time in 1856 - * jiffies will be returned. In all cases the return value is guaranteed 1854 + * jiffies will be returned. In all cases the return value is guaranteed 1857 1855 * to be non-negative. 1858 1856 */ 1859 1857 signed long __sched schedule_timeout(signed long timeout)
+7 -5
kernel/time/vsyscall.c
··· 71 71 { 72 72 struct vdso_data *vdata = __arch_get_k_vdso_data(); 73 73 struct vdso_timestamp *vdso_ts; 74 + s32 clock_mode; 74 75 u64 nsec; 75 76 76 77 /* copy vsyscall data */ 77 78 vdso_write_begin(vdata); 78 79 79 - vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk); 80 - vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); 80 + clock_mode = tk->tkr_mono.clock->vdso_clock_mode; 81 + vdata[CS_HRES_COARSE].clock_mode = clock_mode; 82 + vdata[CS_RAW].clock_mode = clock_mode; 81 83 82 84 /* CLOCK_REALTIME also required for time() */ 83 85 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; ··· 105 103 WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); 106 104 107 105 /* 108 - * Architectures can opt out of updating the high resolution part 109 - * of the VDSO. 106 + * If the current clocksource is not VDSO capable, then spare the 107 + * update of the high reolution parts. 110 108 */ 111 - if (__arch_update_vdso_data()) 109 + if (clock_mode != VDSO_CLOCKMODE_NONE) 112 110 update_vdso_data(vdata, tk); 113 111 114 112 __arch_update_vsyscall(vdata, tk);
+101 -54
lib/vdso/gettimeofday.c
··· 2 2 /* 3 3 * Generic userspace implementations of gettimeofday() and similar. 4 4 */ 5 - #include <linux/compiler.h> 6 - #include <linux/math64.h> 7 - #include <linux/time.h> 8 - #include <linux/kernel.h> 9 - #include <linux/hrtimer_defs.h> 10 5 #include <vdso/datapage.h> 11 6 #include <vdso/helpers.h> 12 - 13 - /* 14 - * The generic vDSO implementation requires that gettimeofday.h 15 - * provides: 16 - * - __arch_get_vdso_data(): to get the vdso datapage. 17 - * - __arch_get_hw_counter(): to get the hw counter based on the 18 - * clock_mode. 19 - * - gettimeofday_fallback(): fallback for gettimeofday. 20 - * - clock_gettime_fallback(): fallback for clock_gettime. 21 - * - clock_getres_fallback(): fallback for clock_getres. 22 - */ 23 - #ifdef ENABLE_COMPAT_VDSO 24 - #include <asm/vdso/compat_gettimeofday.h> 25 - #else 26 - #include <asm/vdso/gettimeofday.h> 27 - #endif /* ENABLE_COMPAT_VDSO */ 28 7 29 8 #ifndef vdso_calc_delta 30 9 /* ··· 14 35 u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) 15 36 { 16 37 return ((cycles - last) & mask) * mult; 38 + } 39 + #endif 40 + 41 + #ifndef vdso_shift_ns 42 + static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift) 43 + { 44 + return ns >> shift; 45 + } 46 + #endif 47 + 48 + #ifndef __arch_vdso_hres_capable 49 + static inline bool __arch_vdso_hres_capable(void) 50 + { 51 + return true; 52 + } 53 + #endif 54 + 55 + #ifndef vdso_clocksource_ok 56 + static inline bool vdso_clocksource_ok(const struct vdso_data *vd) 57 + { 58 + return vd->clock_mode != VDSO_CLOCKMODE_NONE; 17 59 } 18 60 #endif 19 61 ··· 57 57 58 58 do { 59 59 seq = vdso_read_begin(vd); 60 + 61 + if (unlikely(!vdso_clocksource_ok(vd))) 62 + return -1; 63 + 60 64 cycles = __arch_get_hw_counter(vd->clock_mode); 61 65 ns = vdso_ts->nsec; 62 66 last = vd->cycle_last; 63 - if (unlikely((s64)cycles < 0)) 64 - return -1; 65 - 66 67 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); 67 - ns >>= vd->shift; 68 + ns = vdso_shift_ns(ns, vd->shift); 68 69 sec = vdso_ts->sec; 69 70 } while (unlikely(vdso_read_retry(vd, seq))); 70 71 ··· 102 101 u64 cycles, last, sec, ns; 103 102 u32 seq; 104 103 104 + /* Allows to compile the high resolution parts out */ 105 + if (!__arch_vdso_hres_capable()) 106 + return -1; 107 + 105 108 do { 106 109 /* 107 - * Open coded to handle VCLOCK_TIMENS. Time namespace 110 + * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace 108 111 * enabled tasks have a special VVAR page installed which 109 112 * has vd->seq set to 1 and vd->clock_mode set to 110 - * VCLOCK_TIMENS. For non time namespace affected tasks 113 + * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks 111 114 * this does not affect performance because if vd->seq is 112 115 * odd, i.e. a concurrent update is in progress the extra 113 116 * check for vd->clock_mode is just a few extra ··· 120 115 */ 121 116 while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) { 122 117 if (IS_ENABLED(CONFIG_TIME_NS) && 123 - vd->clock_mode == VCLOCK_TIMENS) 118 + vd->clock_mode == VDSO_CLOCKMODE_TIMENS) 124 119 return do_hres_timens(vd, clk, ts); 125 120 cpu_relax(); 126 121 } 127 122 smp_rmb(); 128 123 124 + if (unlikely(!vdso_clocksource_ok(vd))) 125 + return -1; 126 + 129 127 cycles = __arch_get_hw_counter(vd->clock_mode); 130 128 ns = vdso_ts->nsec; 131 129 last = vd->cycle_last; 132 - if (unlikely((s64)cycles < 0)) 133 - return -1; 134 - 135 130 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); 136 - ns >>= vd->shift; 131 + ns = vdso_shift_ns(ns, vd->shift); 137 132 sec = vdso_ts->sec; 138 133 } while (unlikely(vdso_read_retry(vd, seq))); 139 134 ··· 192 187 193 188 do { 194 189 /* 195 - * Open coded to handle VCLOCK_TIMENS. See comment in 190 + * Open coded to handle VDSO_CLOCK_TIMENS. See comment in 196 191 * do_hres(). 197 192 */ 198 193 while ((seq = READ_ONCE(vd->seq)) & 1) { 199 194 if (IS_ENABLED(CONFIG_TIME_NS) && 200 - vd->clock_mode == VCLOCK_TIMENS) 195 + vd->clock_mode == VDSO_CLOCKMODE_TIMENS) 201 196 return do_coarse_timens(vd, clk, ts); 202 197 cpu_relax(); 203 198 } ··· 211 206 } 212 207 213 208 static __maybe_unused int 214 - __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts) 209 + __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock, 210 + struct __kernel_timespec *ts) 215 211 { 216 - const struct vdso_data *vd = __arch_get_vdso_data(); 217 212 u32 msk; 218 213 219 214 /* Check for negative values or invalid clocks */ ··· 238 233 } 239 234 240 235 static __maybe_unused int 241 - __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) 236 + __cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock, 237 + struct __kernel_timespec *ts) 242 238 { 243 - int ret = __cvdso_clock_gettime_common(clock, ts); 239 + int ret = __cvdso_clock_gettime_common(vd, clock, ts); 244 240 245 241 if (unlikely(ret)) 246 242 return clock_gettime_fallback(clock, ts); 247 243 return 0; 248 244 } 249 245 246 + static __maybe_unused int 247 + __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) 248 + { 249 + return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts); 250 + } 251 + 250 252 #ifdef BUILD_VDSO32 251 253 static __maybe_unused int 252 - __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res) 254 + __cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock, 255 + struct old_timespec32 *res) 253 256 { 254 257 struct __kernel_timespec ts; 255 258 int ret; 256 259 257 - ret = __cvdso_clock_gettime_common(clock, &ts); 260 + ret = __cvdso_clock_gettime_common(vd, clock, &ts); 258 261 259 262 if (unlikely(ret)) 260 263 return clock_gettime32_fallback(clock, res); ··· 273 260 274 261 return ret; 275 262 } 263 + 264 + static __maybe_unused int 265 + __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res) 266 + { 267 + return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res); 268 + } 276 269 #endif /* BUILD_VDSO32 */ 277 270 278 271 static __maybe_unused int 279 - __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) 272 + __cvdso_gettimeofday_data(const struct vdso_data *vd, 273 + struct __kernel_old_timeval *tv, struct timezone *tz) 280 274 { 281 - const struct vdso_data *vd = __arch_get_vdso_data(); 282 275 283 276 if (likely(tv != NULL)) { 284 277 struct __kernel_timespec ts; ··· 298 279 299 280 if (unlikely(tz != NULL)) { 300 281 if (IS_ENABLED(CONFIG_TIME_NS) && 301 - vd->clock_mode == VCLOCK_TIMENS) 282 + vd->clock_mode == VDSO_CLOCKMODE_TIMENS) 302 283 vd = __arch_get_timens_vdso_data(); 303 284 304 285 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest; ··· 308 289 return 0; 309 290 } 310 291 311 - #ifdef VDSO_HAS_TIME 312 - static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time) 292 + static __maybe_unused int 293 + __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) 313 294 { 314 - const struct vdso_data *vd = __arch_get_vdso_data(); 295 + return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz); 296 + } 297 + 298 + #ifdef VDSO_HAS_TIME 299 + static __maybe_unused __kernel_old_time_t 300 + __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time) 301 + { 315 302 __kernel_old_time_t t; 316 303 317 - if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS) 304 + if (IS_ENABLED(CONFIG_TIME_NS) && 305 + vd->clock_mode == VDSO_CLOCKMODE_TIMENS) 318 306 vd = __arch_get_timens_vdso_data(); 319 307 320 308 t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec); ··· 331 305 332 306 return t; 333 307 } 308 + 309 + static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time) 310 + { 311 + return __cvdso_time_data(__arch_get_vdso_data(), time); 312 + } 334 313 #endif /* VDSO_HAS_TIME */ 335 314 336 315 #ifdef VDSO_HAS_CLOCK_GETRES 337 316 static __maybe_unused 338 - int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res) 317 + int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock, 318 + struct __kernel_timespec *res) 339 319 { 340 - const struct vdso_data *vd = __arch_get_vdso_data(); 341 320 u32 msk; 342 321 u64 ns; 343 322 ··· 350 319 if (unlikely((u32) clock >= MAX_CLOCKS)) 351 320 return -1; 352 321 353 - if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS) 322 + if (IS_ENABLED(CONFIG_TIME_NS) && 323 + vd->clock_mode == VDSO_CLOCKMODE_TIMENS) 354 324 vd = __arch_get_timens_vdso_data(); 355 325 356 326 /* ··· 381 349 } 382 350 383 351 static __maybe_unused 384 - int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) 352 + int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock, 353 + struct __kernel_timespec *res) 385 354 { 386 - int ret = __cvdso_clock_getres_common(clock, res); 355 + int ret = __cvdso_clock_getres_common(vd, clock, res); 387 356 388 357 if (unlikely(ret)) 389 358 return clock_getres_fallback(clock, res); 390 359 return 0; 391 360 } 392 361 362 + static __maybe_unused 363 + int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) 364 + { 365 + return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res); 366 + } 367 + 393 368 #ifdef BUILD_VDSO32 394 369 static __maybe_unused int 395 - __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) 370 + __cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock, 371 + struct old_timespec32 *res) 396 372 { 397 373 struct __kernel_timespec ts; 398 374 int ret; 399 375 400 - ret = __cvdso_clock_getres_common(clock, &ts); 376 + ret = __cvdso_clock_getres_common(vd, clock, &ts); 401 377 402 378 if (unlikely(ret)) 403 379 return clock_getres32_fallback(clock, res); ··· 415 375 res->tv_nsec = ts.tv_nsec; 416 376 } 417 377 return ret; 378 + } 379 + 380 + static __maybe_unused int 381 + __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) 382 + { 383 + return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(), 384 + clock, res); 418 385 } 419 386 #endif /* BUILD_VDSO32 */ 420 387 #endif /* VDSO_HAS_CLOCK_GETRES */
+5 -1
scripts/mod/modpost.c
··· 2252 2252 **/ 2253 2253 static void add_header(struct buffer *b, struct module *mod) 2254 2254 { 2255 - buf_printf(b, "#include <linux/build-salt.h>\n"); 2256 2255 buf_printf(b, "#include <linux/module.h>\n"); 2256 + /* 2257 + * Include build-salt.h after module.h in order to 2258 + * inherit the definitions. 2259 + */ 2260 + buf_printf(b, "#include <linux/build-salt.h>\n"); 2257 2261 buf_printf(b, "#include <linux/vermagic.h>\n"); 2258 2262 buf_printf(b, "#include <linux/compiler.h>\n"); 2259 2263 buf_printf(b, "\n");