Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer type cleanups from Thomas Gleixner:
"This series does a tree wide cleanup of types related to
timers/timekeeping.

- Get rid of cycles_t and use a plain u64. The type is not really
helpful and caused more confusion than clarity

- Get rid of the ktime union. The union has become useless as we use
the scalar nanoseconds storage unconditionally now. The 32bit
timespec alike storage got removed due to the Y2038 limitations
some time ago.

That leaves the odd union access around for no reason. Clean it up.

Both changes have been done with coccinelle and a small amount of
manual mopping up"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
ktime: Get rid of ktime_equal()
ktime: Cleanup ktime_set() usage
ktime: Get rid of the union
clocksource: Use a plain u64 instead of cycle_t

+604 -665
+2 -2
arch/alpha/kernel/time.c
··· 133 133 * The QEMU clock as a clocksource primitive. 134 134 */ 135 135 136 - static cycle_t 136 + static u64 137 137 qemu_cs_read(struct clocksource *cs) 138 138 { 139 139 return qemu_get_vmtime(); ··· 260 260 * use this method when WTINT is in use. 261 261 */ 262 262 263 - static cycle_t read_rpcc(struct clocksource *cs) 263 + static u64 read_rpcc(struct clocksource *cs) 264 264 { 265 265 return rpcc(); 266 266 }
+1 -1
arch/arm/mach-davinci/time.c
··· 268 268 /* 269 269 * clocksource 270 270 */ 271 - static cycle_t read_cycles(struct clocksource *cs) 271 + static u64 read_cycles(struct clocksource *cs) 272 272 { 273 273 struct timer_s *t = &timers[TID_CLOCKSOURCE]; 274 274
+2 -2
arch/arm/mach-ep93xx/timer-ep93xx.c
··· 59 59 return ret; 60 60 } 61 61 62 - cycle_t ep93xx_clocksource_read(struct clocksource *c) 62 + u64 ep93xx_clocksource_read(struct clocksource *c) 63 63 { 64 64 u64 ret; 65 65 66 66 ret = readl(EP93XX_TIMER4_VALUE_LOW); 67 67 ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); 68 - return (cycle_t) ret; 68 + return (u64) ret; 69 69 } 70 70 71 71 static int ep93xx_clkevt_set_next_event(unsigned long next,
+1 -1
arch/arm/mach-footbridge/dc21285-timer.c
··· 19 19 20 20 #include "common.h" 21 21 22 - static cycle_t cksrc_dc21285_read(struct clocksource *cs) 22 + static u64 cksrc_dc21285_read(struct clocksource *cs) 23 23 { 24 24 return cs->mask - *CSR_TIMER2_VALUE; 25 25 }
+1 -1
arch/arm/mach-ixp4xx/common.c
··· 493 493 * clocksource 494 494 */ 495 495 496 - static cycle_t ixp4xx_clocksource_read(struct clocksource *c) 496 + static u64 ixp4xx_clocksource_read(struct clocksource *c) 497 497 { 498 498 return *IXP4XX_OSTS; 499 499 }
+1 -1
arch/arm/mach-mmp/time.c
··· 144 144 .set_state_oneshot = timer_set_shutdown, 145 145 }; 146 146 147 - static cycle_t clksrc_read(struct clocksource *cs) 147 + static u64 clksrc_read(struct clocksource *cs) 148 148 { 149 149 return timer_read(); 150 150 }
+2 -2
arch/arm/mach-omap2/timer.c
··· 369 369 /* 370 370 * clocksource 371 371 */ 372 - static cycle_t clocksource_read_cycles(struct clocksource *cs) 372 + static u64 clocksource_read_cycles(struct clocksource *cs) 373 373 { 374 - return (cycle_t)__omap_dm_timer_read_counter(&clksrc, 374 + return (u64)__omap_dm_timer_read_counter(&clksrc, 375 375 OMAP_TIMER_NONPOSTED); 376 376 } 377 377
+1 -1
arch/arm/plat-iop/time.c
··· 38 38 /* 39 39 * IOP clocksource (free-running timer 1). 40 40 */ 41 - static cycle_t notrace iop_clocksource_read(struct clocksource *unused) 41 + static u64 notrace iop_clocksource_read(struct clocksource *unused) 42 42 { 43 43 return 0xffffffffu - read_tcr1(); 44 44 }
+2 -2
arch/avr32/kernel/time.c
··· 20 20 21 21 static bool disable_cpu_idle_poll; 22 22 23 - static cycle_t read_cycle_count(struct clocksource *cs) 23 + static u64 read_cycle_count(struct clocksource *cs) 24 24 { 25 - return (cycle_t)sysreg_read(COUNT); 25 + return (u64)sysreg_read(COUNT); 26 26 } 27 27 28 28 /*
+2 -2
arch/blackfin/kernel/time-ts.c
··· 26 26 27 27 #if defined(CONFIG_CYCLES_CLOCKSOURCE) 28 28 29 - static notrace cycle_t bfin_read_cycles(struct clocksource *cs) 29 + static notrace u64 bfin_read_cycles(struct clocksource *cs) 30 30 { 31 31 #ifdef CONFIG_CPU_FREQ 32 32 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); ··· 80 80 enable_gptimers(TIMER0bit); 81 81 } 82 82 83 - static cycle_t bfin_read_gptimer0(struct clocksource *cs) 83 + static u64 bfin_read_gptimer0(struct clocksource *cs) 84 84 { 85 85 return bfin_read_TIMER0_COUNTER(); 86 86 }
+1 -1
arch/c6x/kernel/time.c
··· 26 26 static u32 sched_clock_multiplier; 27 27 #define SCHED_CLOCK_SHIFT 16 28 28 29 - static cycle_t tsc_read(struct clocksource *cs) 29 + static u64 tsc_read(struct clocksource *cs) 30 30 { 31 31 return get_cycles(); 32 32 }
+2 -2
arch/hexagon/kernel/time.c
··· 72 72 /* Look for "TCX0" for related constants. */ 73 73 static __iomem struct adsp_hw_timer_struct *rtos_timer; 74 74 75 - static cycle_t timer_get_cycles(struct clocksource *cs) 75 + static u64 timer_get_cycles(struct clocksource *cs) 76 76 { 77 - return (cycle_t) __vmgettime(); 77 + return (u64) __vmgettime(); 78 78 } 79 79 80 80 static struct clocksource hexagon_clocksource = {
+2 -2
arch/ia64/kernel/cyclone.c
··· 21 21 22 22 static void __iomem *cyclone_mc; 23 23 24 - static cycle_t read_cyclone(struct clocksource *cs) 24 + static u64 read_cyclone(struct clocksource *cs) 25 25 { 26 - return (cycle_t)readq((void __iomem *)cyclone_mc); 26 + return (u64)readq((void __iomem *)cyclone_mc); 27 27 } 28 28 29 29 static struct clocksource clocksource_cyclone = {
+3 -3
arch/ia64/kernel/fsyscall_gtod_data.h
··· 9 9 seqcount_t seq; 10 10 struct timespec wall_time; 11 11 struct timespec monotonic_time; 12 - cycle_t clk_mask; 12 + u64 clk_mask; 13 13 u32 clk_mult; 14 14 u32 clk_shift; 15 15 void *clk_fsys_mmio; 16 - cycle_t clk_cycle_last; 16 + u64 clk_cycle_last; 17 17 } ____cacheline_aligned; 18 18 19 19 struct itc_jitter_data_t { 20 20 int itc_jitter; 21 - cycle_t itc_lastcycle; 21 + u64 itc_lastcycle; 22 22 } ____cacheline_aligned; 23 23
+3 -3
arch/ia64/kernel/time.c
··· 31 31 32 32 #include "fsyscall_gtod_data.h" 33 33 34 - static cycle_t itc_get_cycles(struct clocksource *cs); 34 + static u64 itc_get_cycles(struct clocksource *cs); 35 35 36 36 struct fsyscall_gtod_data_t fsyscall_gtod_data; 37 37 ··· 323 323 } 324 324 } 325 325 326 - static cycle_t itc_get_cycles(struct clocksource *cs) 326 + static u64 itc_get_cycles(struct clocksource *cs) 327 327 { 328 328 unsigned long lcycle, now, ret; 329 329 ··· 397 397 } 398 398 399 399 void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, 400 - struct clocksource *c, u32 mult, cycle_t cycle_last) 400 + struct clocksource *c, u32 mult, u64 cycle_last) 401 401 { 402 402 write_seqcount_begin(&fsyscall_gtod_data.seq); 403 403
+2 -2
arch/ia64/sn/kernel/sn2/timer.c
··· 22 22 23 23 extern unsigned long sn_rtc_cycles_per_second; 24 24 25 - static cycle_t read_sn2(struct clocksource *cs) 25 + static u64 read_sn2(struct clocksource *cs) 26 26 { 27 - return (cycle_t)readq(RTC_COUNTER_ADDR); 27 + return (u64)readq(RTC_COUNTER_ADDR); 28 28 } 29 29 30 30 static struct clocksource clocksource_sn2 = {
+1 -1
arch/m68k/68000/timers.c
··· 76 76 77 77 /***************************************************************************/ 78 78 79 - static cycle_t m68328_read_clk(struct clocksource *cs) 79 + static u64 m68328_read_clk(struct clocksource *cs) 80 80 { 81 81 unsigned long flags; 82 82 u32 cycles;
+1 -1
arch/m68k/coldfire/dma_timer.c
··· 34 34 #define DMA_DTMR_CLK_DIV_16 (2 << 1) 35 35 #define DMA_DTMR_ENABLE (1 << 0) 36 36 37 - static cycle_t cf_dt_get_cycles(struct clocksource *cs) 37 + static u64 cf_dt_get_cycles(struct clocksource *cs) 38 38 { 39 39 return __raw_readl(DTCN0); 40 40 }
+1 -1
arch/m68k/coldfire/pit.c
··· 118 118 119 119 /***************************************************************************/ 120 120 121 - static cycle_t pit_read_clk(struct clocksource *cs) 121 + static u64 pit_read_clk(struct clocksource *cs) 122 122 { 123 123 unsigned long flags; 124 124 u32 cycles;
+1 -1
arch/m68k/coldfire/sltimers.c
··· 97 97 .handler = mcfslt_tick, 98 98 }; 99 99 100 - static cycle_t mcfslt_read_clk(struct clocksource *cs) 100 + static u64 mcfslt_read_clk(struct clocksource *cs) 101 101 { 102 102 unsigned long flags; 103 103 u32 cycles, scnt;
+1 -1
arch/m68k/coldfire/timers.c
··· 89 89 90 90 /***************************************************************************/ 91 91 92 - static cycle_t mcftmr_read_clk(struct clocksource *cs) 92 + static u64 mcftmr_read_clk(struct clocksource *cs) 93 93 { 94 94 unsigned long flags; 95 95 u32 cycles;
+3 -3
arch/microblaze/kernel/timer.c
··· 190 190 return read_fn(timer_baseaddr + TCR1); 191 191 } 192 192 193 - static cycle_t xilinx_read(struct clocksource *cs) 193 + static u64 xilinx_read(struct clocksource *cs) 194 194 { 195 195 /* reading actual value of timer 1 */ 196 - return (cycle_t)xilinx_clock_read(); 196 + return (u64)xilinx_clock_read(); 197 197 } 198 198 199 199 static struct timecounter xilinx_tc = { 200 200 .cc = NULL, 201 201 }; 202 202 203 - static cycle_t xilinx_cc_read(const struct cyclecounter *cc) 203 + static u64 xilinx_cc_read(const struct cyclecounter *cc) 204 204 { 205 205 return xilinx_read(NULL); 206 206 }
+1 -1
arch/mips/alchemy/common/time.c
··· 44 44 /* 32kHz clock enabled and detected */ 45 45 #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) 46 46 47 - static cycle_t au1x_counter1_read(struct clocksource *cs) 47 + static u64 au1x_counter1_read(struct clocksource *cs) 48 48 { 49 49 return alchemy_rdsys(AU1000_SYS_RTCREAD); 50 50 }
+1 -1
arch/mips/cavium-octeon/csrc-octeon.c
··· 98 98 local_irq_restore(flags); 99 99 } 100 100 101 - static cycle_t octeon_cvmcount_read(struct clocksource *cs) 101 + static u64 octeon_cvmcount_read(struct clocksource *cs) 102 102 { 103 103 return read_c0_cvmcount(); 104 104 }
+1 -1
arch/mips/jz4740/time.c
··· 34 34 35 35 static uint16_t jz4740_jiffies_per_tick; 36 36 37 - static cycle_t jz4740_clocksource_read(struct clocksource *cs) 37 + static u64 jz4740_clocksource_read(struct clocksource *cs) 38 38 { 39 39 return jz4740_timer_get_count(TIMER_CLOCKSOURCE); 40 40 }
+1 -1
arch/mips/kernel/cevt-txx9.c
··· 27 27 struct txx9_tmr_reg __iomem *tmrptr; 28 28 }; 29 29 30 - static cycle_t txx9_cs_read(struct clocksource *cs) 30 + static u64 txx9_cs_read(struct clocksource *cs) 31 31 { 32 32 struct txx9_clocksource *txx9_cs = 33 33 container_of(cs, struct txx9_clocksource, cs);
+2 -2
arch/mips/kernel/csrc-bcm1480.c
··· 25 25 26 26 #include <asm/sibyte/sb1250.h> 27 27 28 - static cycle_t bcm1480_hpt_read(struct clocksource *cs) 28 + static u64 bcm1480_hpt_read(struct clocksource *cs) 29 29 { 30 - return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); 30 + return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); 31 31 } 32 32 33 33 struct clocksource bcm1480_clocksource = {
+1 -1
arch/mips/kernel/csrc-ioasic.c
··· 22 22 #include <asm/dec/ioasic.h> 23 23 #include <asm/dec/ioasic_addrs.h> 24 24 25 - static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) 25 + static u64 dec_ioasic_hpt_read(struct clocksource *cs) 26 26 { 27 27 return ioasic_read(IO_REG_FCTR); 28 28 }
+1 -1
arch/mips/kernel/csrc-r4k.c
··· 11 11 12 12 #include <asm/time.h> 13 13 14 - static cycle_t c0_hpt_read(struct clocksource *cs) 14 + static u64 c0_hpt_read(struct clocksource *cs) 15 15 { 16 16 return read_c0_count(); 17 17 }
+2 -2
arch/mips/kernel/csrc-sb1250.c
··· 30 30 * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over 31 31 * again. 32 32 */ 33 - static inline cycle_t sb1250_hpt_get_cycles(void) 33 + static inline u64 sb1250_hpt_get_cycles(void) 34 34 { 35 35 unsigned int count; 36 36 void __iomem *addr; ··· 41 41 return SB1250_HPT_VALUE - count; 42 42 } 43 43 44 - static cycle_t sb1250_hpt_read(struct clocksource *cs) 44 + static u64 sb1250_hpt_read(struct clocksource *cs) 45 45 { 46 46 return sb1250_hpt_get_cycles(); 47 47 }
+2 -2
arch/mips/loongson32/common/time.c
··· 63 63 ls1x_pwmtimer_restart(); 64 64 } 65 65 66 - static cycle_t ls1x_clocksource_read(struct clocksource *cs) 66 + static u64 ls1x_clocksource_read(struct clocksource *cs) 67 67 { 68 68 unsigned long flags; 69 69 int count; ··· 107 107 108 108 raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags); 109 109 110 - return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count; 110 + return (u64) (jifs * ls1x_jiffies_per_tick) + count; 111 111 } 112 112 113 113 static struct clocksource ls1x_clocksource = {
+2 -2
arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
··· 144 144 * to just read by itself. So use jiffies to emulate a free 145 145 * running counter: 146 146 */ 147 - static cycle_t mfgpt_read(struct clocksource *cs) 147 + static u64 mfgpt_read(struct clocksource *cs) 148 148 { 149 149 unsigned long flags; 150 150 int count; ··· 188 188 189 189 raw_spin_unlock_irqrestore(&mfgpt_lock, flags); 190 190 191 - return (cycle_t) (jifs * COMPARE) + count; 191 + return (u64) (jifs * COMPARE) + count; 192 192 } 193 193 194 194 static struct clocksource clocksource_mfgpt = {
+2 -2
arch/mips/loongson64/loongson-3/hpet.c
··· 248 248 pr_info("hpet clock event device register\n"); 249 249 } 250 250 251 - static cycle_t hpet_read_counter(struct clocksource *cs) 251 + static u64 hpet_read_counter(struct clocksource *cs) 252 252 { 253 - return (cycle_t)hpet_read(HPET_COUNTER); 253 + return (u64)hpet_read(HPET_COUNTER); 254 254 } 255 255 256 256 static void hpet_suspend(struct clocksource *cs)
+1 -1
arch/mips/mti-malta/malta-time.c
··· 75 75 unsigned int count, start; 76 76 unsigned char secs1, secs2, ctrl; 77 77 int secs; 78 - cycle_t giccount = 0, gicstart = 0; 78 + u64 giccount = 0, gicstart = 0; 79 79 80 80 #if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ 81 81 mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
+2 -2
arch/mips/netlogic/common/time.c
··· 59 59 return IRQ_TIMER; 60 60 } 61 61 62 - static cycle_t nlm_get_pic_timer(struct clocksource *cs) 62 + static u64 nlm_get_pic_timer(struct clocksource *cs) 63 63 { 64 64 uint64_t picbase = nlm_get_node(0)->picbase; 65 65 66 66 return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER); 67 67 } 68 68 69 - static cycle_t nlm_get_pic_timer32(struct clocksource *cs) 69 + static u64 nlm_get_pic_timer32(struct clocksource *cs) 70 70 { 71 71 uint64_t picbase = nlm_get_node(0)->picbase; 72 72
+1 -1
arch/mips/sgi-ip27/ip27-timer.c
··· 140 140 setup_irq(irq, &hub_rt_irqaction); 141 141 } 142 142 143 - static cycle_t hub_rt_read(struct clocksource *cs) 143 + static u64 hub_rt_read(struct clocksource *cs) 144 144 { 145 145 return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); 146 146 }
+1 -1
arch/mn10300/kernel/csrc-mn10300.c
··· 13 13 #include <asm/timex.h> 14 14 #include "internal.h" 15 15 16 - static cycle_t mn10300_read(struct clocksource *cs) 16 + static u64 mn10300_read(struct clocksource *cs) 17 17 { 18 18 return read_timestamp_counter(); 19 19 }
+1 -1
arch/nios2/kernel/time.c
··· 81 81 return count; 82 82 } 83 83 84 - static cycle_t nios2_timer_read(struct clocksource *cs) 84 + static u64 nios2_timer_read(struct clocksource *cs) 85 85 { 86 86 struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs); 87 87 unsigned long flags;
+2 -2
arch/openrisc/kernel/time.c
··· 117 117 * is 32 bits wide and runs at the CPU clock frequency. 118 118 */ 119 119 120 - static cycle_t openrisc_timer_read(struct clocksource *cs) 120 + static u64 openrisc_timer_read(struct clocksource *cs) 121 121 { 122 - return (cycle_t) mfspr(SPR_TTCR); 122 + return (u64) mfspr(SPR_TTCR); 123 123 } 124 124 125 125 static struct clocksource openrisc_timer = {
+1 -1
arch/parisc/kernel/time.c
··· 137 137 138 138 /* clock source code */ 139 139 140 - static cycle_t notrace read_cr16(struct clocksource *cs) 140 + static u64 notrace read_cr16(struct clocksource *cs) 141 141 { 142 142 return get_cycles(); 143 143 }
+7 -7
arch/powerpc/kernel/time.c
··· 80 80 #include <linux/clockchips.h> 81 81 #include <linux/timekeeper_internal.h> 82 82 83 - static cycle_t rtc_read(struct clocksource *); 83 + static u64 rtc_read(struct clocksource *); 84 84 static struct clocksource clocksource_rtc = { 85 85 .name = "rtc", 86 86 .rating = 400, ··· 89 89 .read = rtc_read, 90 90 }; 91 91 92 - static cycle_t timebase_read(struct clocksource *); 92 + static u64 timebase_read(struct clocksource *); 93 93 static struct clocksource clocksource_timebase = { 94 94 .name = "timebase", 95 95 .rating = 400, ··· 802 802 } 803 803 804 804 /* clocksource code */ 805 - static cycle_t rtc_read(struct clocksource *cs) 805 + static u64 rtc_read(struct clocksource *cs) 806 806 { 807 - return (cycle_t)get_rtc(); 807 + return (u64)get_rtc(); 808 808 } 809 809 810 - static cycle_t timebase_read(struct clocksource *cs) 810 + static u64 timebase_read(struct clocksource *cs) 811 811 { 812 - return (cycle_t)get_tb(); 812 + return (u64)get_tb(); 813 813 } 814 814 815 815 void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 816 - struct clocksource *clock, u32 mult, cycle_t cycle_last) 816 + struct clocksource *clock, u32 mult, u64 cycle_last) 817 817 { 818 818 u64 new_tb_to_xs, new_stamp_xsec; 819 819 u32 frac_sec;
+1 -2
arch/powerpc/kvm/book3s_hv.c
··· 1872 1872 } 1873 1873 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC 1874 1874 / tb_ticks_per_sec; 1875 - hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), 1876 - HRTIMER_MODE_REL); 1875 + hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); 1877 1876 vcpu->arch.timer_running = 1; 1878 1877 } 1879 1878
+2 -2
arch/powerpc/oprofile/cell/spu_profiler.c
··· 180 180 smp_wmb(); /* insure spu event buffer updates are written */ 181 181 /* don't want events intermingled... */ 182 182 183 - kt = ktime_set(0, profiling_interval); 183 + kt = profiling_interval; 184 184 if (!spu_prof_running) 185 185 goto stop; 186 186 hrtimer_forward(timer, timer->base->get_time(), kt); ··· 204 204 ktime_t kt; 205 205 206 206 pr_debug("timer resolution: %lu\n", TICK_NSEC); 207 - kt = ktime_set(0, profiling_interval); 207 + kt = profiling_interval; 208 208 hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 209 209 hrtimer_set_expires(&timer, kt); 210 210 timer.function = profile_spus;
+1 -1
arch/s390/kernel/time.c
··· 209 209 tod_to_timeval(clock - TOD_UNIX_EPOCH, ts); 210 210 } 211 211 212 - static cycle_t read_tod_clock(struct clocksource *cs) 212 + static u64 read_tod_clock(struct clocksource *cs) 213 213 { 214 214 unsigned long long now, adj; 215 215
+1 -1
arch/s390/kvm/interrupt.c
··· 1019 1019 return 0; 1020 1020 1021 1021 __set_cpu_idle(vcpu); 1022 - hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 1022 + hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL); 1023 1023 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); 1024 1024 no_timer: 1025 1025 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+1 -1
arch/sparc/kernel/time_32.c
··· 148 148 return offset; 149 149 } 150 150 151 - static cycle_t timer_cs_read(struct clocksource *cs) 151 + static u64 timer_cs_read(struct clocksource *cs) 152 152 { 153 153 unsigned int seq, offset; 154 154 u64 cycles;
+1 -1
arch/sparc/kernel/time_64.c
··· 770 770 } 771 771 EXPORT_SYMBOL(udelay); 772 772 773 - static cycle_t clocksource_tick_read(struct clocksource *cs) 773 + static u64 clocksource_tick_read(struct clocksource *cs) 774 774 { 775 775 return tick_ops->get_tick(); 776 776 }
+1 -1
arch/um/kernel/time.c
··· 83 83 return IRQ_HANDLED; 84 84 } 85 85 86 - static cycle_t timer_read(struct clocksource *cs) 86 + static u64 timer_read(struct clocksource *cs) 87 87 { 88 88 return os_nsecs() / TIMER_MULTIPLIER; 89 89 }
+1 -1
arch/unicore32/kernel/time.c
··· 62 62 .set_state_oneshot = puv3_osmr0_shutdown, 63 63 }; 64 64 65 - static cycle_t puv3_read_oscr(struct clocksource *cs) 65 + static u64 puv3_read_oscr(struct clocksource *cs) 66 66 { 67 67 return readl(OST_OSCR); 68 68 }
+4 -4
arch/x86/entry/vdso/vclock_gettime.c
··· 92 92 return (const struct pvclock_vsyscall_time_info *)&pvclock_page; 93 93 } 94 94 95 - static notrace cycle_t vread_pvclock(int *mode) 95 + static notrace u64 vread_pvclock(int *mode) 96 96 { 97 97 const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; 98 - cycle_t ret; 98 + u64 ret; 99 99 u64 last; 100 100 u32 version; 101 101 ··· 142 142 } 143 143 #endif 144 144 145 - notrace static cycle_t vread_tsc(void) 145 + notrace static u64 vread_tsc(void) 146 146 { 147 - cycle_t ret = (cycle_t)rdtsc_ordered(); 147 + u64 ret = (u64)rdtsc_ordered(); 148 148 u64 last = gtod->cycle_last; 149 149 150 150 if (likely(ret >= last))
+1 -1
arch/x86/include/asm/kvm_host.h
··· 768 768 spinlock_t pvclock_gtod_sync_lock; 769 769 bool use_master_clock; 770 770 u64 master_kernel_ns; 771 - cycle_t master_cycle_now; 771 + u64 master_cycle_now; 772 772 struct delayed_work kvmclock_update_work; 773 773 struct delayed_work kvmclock_sync_work; 774 774
+3 -4
arch/x86/include/asm/pvclock.h
··· 14 14 #endif 15 15 16 16 /* some helper functions for xen and kvm pv clock sources */ 17 - cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 17 + u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 18 18 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); 19 19 void pvclock_set_flags(u8 flags); 20 20 unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); ··· 87 87 } 88 88 89 89 static __always_inline 90 - cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, 91 - u64 tsc) 90 + u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc) 92 91 { 93 92 u64 delta = tsc - src->tsc_timestamp; 94 - cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, 93 + u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, 95 94 src->tsc_shift); 96 95 return src->system_time + offset; 97 96 }
+1 -1
arch/x86/include/asm/tsc.h
··· 29 29 return rdtsc(); 30 30 } 31 31 32 - extern struct system_counterval_t convert_art_to_tsc(cycle_t art); 32 + extern struct system_counterval_t convert_art_to_tsc(u64 art); 33 33 34 34 extern void tsc_init(void); 35 35 extern void mark_tsc_unstable(char *reason);
+2 -2
arch/x86/include/asm/vgtod.h
··· 17 17 unsigned seq; 18 18 19 19 int vclock_mode; 20 - cycle_t cycle_last; 21 - cycle_t mask; 20 + u64 cycle_last; 21 + u64 mask; 22 22 u32 mult; 23 23 u32 shift; 24 24
+2 -2
arch/x86/kernel/apb_timer.c
··· 247 247 static int apbt_clocksource_register(void) 248 248 { 249 249 u64 start, now; 250 - cycle_t t1; 250 + u64 t1; 251 251 252 252 /* Start the counter, use timer 2 as source, timer 0/1 for event */ 253 253 dw_apb_clocksource_start(clocksource_apbt); ··· 355 355 { 356 356 int i, scale; 357 357 u64 old, new; 358 - cycle_t t1, t2; 358 + u64 t1, t2; 359 359 unsigned long khz = 0; 360 360 u32 loop, shift; 361 361
+2 -2
arch/x86/kernel/cpu/mshyperv.c
··· 133 133 return 0; 134 134 } 135 135 136 - static cycle_t read_hv_clock(struct clocksource *arg) 136 + static u64 read_hv_clock(struct clocksource *arg) 137 137 { 138 - cycle_t current_tick; 138 + u64 current_tick; 139 139 /* 140 140 * Read the partition counter to get the current tick count. This count 141 141 * is set to 0 when the partition is created and is incremented in
+7 -7
arch/x86/kernel/hpet.c
··· 791 791 { .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, 792 792 }; 793 793 794 - static cycle_t read_hpet(struct clocksource *cs) 794 + static u64 read_hpet(struct clocksource *cs) 795 795 { 796 796 unsigned long flags; 797 797 union hpet_lock old, new; ··· 802 802 * Read HPET directly if in NMI. 803 803 */ 804 804 if (in_nmi()) 805 - return (cycle_t)hpet_readl(HPET_COUNTER); 805 + return (u64)hpet_readl(HPET_COUNTER); 806 806 807 807 /* 808 808 * Read the current state of the lock and HPET value atomically. ··· 821 821 WRITE_ONCE(hpet.value, new.value); 822 822 arch_spin_unlock(&hpet.lock); 823 823 local_irq_restore(flags); 824 - return (cycle_t)new.value; 824 + return (u64)new.value; 825 825 } 826 826 local_irq_restore(flags); 827 827 ··· 843 843 new.lockval = READ_ONCE(hpet.lockval); 844 844 } while ((new.value == old.value) && arch_spin_is_locked(&new.lock)); 845 845 846 - return (cycle_t)new.value; 846 + return (u64)new.value; 847 847 } 848 848 #else 849 849 /* 850 850 * For UP or 32-bit. 851 851 */ 852 - static cycle_t read_hpet(struct clocksource *cs) 852 + static u64 read_hpet(struct clocksource *cs) 853 853 { 854 - return (cycle_t)hpet_readl(HPET_COUNTER); 854 + return (u64)hpet_readl(HPET_COUNTER); 855 855 } 856 856 #endif 857 857 ··· 867 867 static int hpet_clocksource_register(void) 868 868 { 869 869 u64 start, now; 870 - cycle_t t1; 870 + u64 t1; 871 871 872 872 /* Start the counter */ 873 873 hpet_restart_counter();
+5 -5
arch/x86/kernel/kvmclock.c
··· 32 32 static int kvmclock __ro_after_init = 1; 33 33 static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; 34 34 static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; 35 - static cycle_t kvm_sched_clock_offset; 35 + static u64 kvm_sched_clock_offset; 36 36 37 37 static int parse_no_kvmclock(char *arg) 38 38 { ··· 79 79 return -1; 80 80 } 81 81 82 - static cycle_t kvm_clock_read(void) 82 + static u64 kvm_clock_read(void) 83 83 { 84 84 struct pvclock_vcpu_time_info *src; 85 - cycle_t ret; 85 + u64 ret; 86 86 int cpu; 87 87 88 88 preempt_disable_notrace(); ··· 93 93 return ret; 94 94 } 95 95 96 - static cycle_t kvm_clock_get_cycles(struct clocksource *cs) 96 + static u64 kvm_clock_get_cycles(struct clocksource *cs) 97 97 { 98 98 return kvm_clock_read(); 99 99 } 100 100 101 - static cycle_t kvm_sched_clock_read(void) 101 + static u64 kvm_sched_clock_read(void) 102 102 { 103 103 return kvm_clock_read() - kvm_sched_clock_offset; 104 104 }
+2 -2
arch/x86/kernel/pvclock.c
··· 71 71 return flags & valid_flags; 72 72 } 73 73 74 - cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) 74 + u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) 75 75 { 76 76 unsigned version; 77 - cycle_t ret; 77 + u64 ret; 78 78 u64 last; 79 79 u8 flags; 80 80
+3 -3
arch/x86/kernel/tsc.c
··· 1101 1101 * checking the result of read_tsc() - cycle_last for being negative. 1102 1102 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. 1103 1103 */ 1104 - static cycle_t read_tsc(struct clocksource *cs) 1104 + static u64 read_tsc(struct clocksource *cs) 1105 1105 { 1106 - return (cycle_t)rdtsc_ordered(); 1106 + return (u64)rdtsc_ordered(); 1107 1107 } 1108 1108 1109 1109 /* ··· 1192 1192 /* 1193 1193 * Convert ART to TSC given numerator/denominator found in detect_art() 1194 1194 */ 1195 - struct system_counterval_t convert_art_to_tsc(cycle_t art) 1195 + struct system_counterval_t convert_art_to_tsc(u64 art) 1196 1196 { 1197 1197 u64 tmp, res, rem; 1198 1198
+2 -2
arch/x86/kvm/lapic.c
··· 1106 1106 now = ktime_get(); 1107 1107 remaining = ktime_sub(apic->lapic_timer.target_expiration, now); 1108 1108 if (ktime_to_ns(remaining) < 0) 1109 - remaining = ktime_set(0, 0); 1109 + remaining = 0; 1110 1110 1111 1111 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); 1112 1112 tmcct = div64_u64(ns, ··· 2057 2057 apic->lapic_timer.tscdeadline = 0; 2058 2058 if (apic_lvtt_oneshot(apic)) { 2059 2059 apic->lapic_timer.tscdeadline = 0; 2060 - apic->lapic_timer.target_expiration = ktime_set(0, 0); 2060 + apic->lapic_timer.target_expiration = 0; 2061 2061 } 2062 2062 atomic_set(&apic->lapic_timer.pending, 0); 2063 2063 }
+7 -7
arch/x86/kvm/x86.c
··· 1131 1131 1132 1132 struct { /* extract of a clocksource struct */ 1133 1133 int vclock_mode; 1134 - cycle_t cycle_last; 1135 - cycle_t mask; 1134 + u64 cycle_last; 1135 + u64 mask; 1136 1136 u32 mult; 1137 1137 u32 shift; 1138 1138 } clock; ··· 1572 1572 1573 1573 #ifdef CONFIG_X86_64 1574 1574 1575 - static cycle_t read_tsc(void) 1575 + static u64 read_tsc(void) 1576 1576 { 1577 - cycle_t ret = (cycle_t)rdtsc_ordered(); 1577 + u64 ret = (u64)rdtsc_ordered(); 1578 1578 u64 last = pvclock_gtod_data.clock.cycle_last; 1579 1579 1580 1580 if (likely(ret >= last)) ··· 1592 1592 return last; 1593 1593 } 1594 1594 1595 - static inline u64 vgettsc(cycle_t *cycle_now) 1595 + static inline u64 vgettsc(u64 *cycle_now) 1596 1596 { 1597 1597 long v; 1598 1598 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; ··· 1603 1603 return v * gtod->clock.mult; 1604 1604 } 1605 1605 1606 - static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) 1606 + static int do_monotonic_boot(s64 *t, u64 *cycle_now) 1607 1607 { 1608 1608 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1609 1609 unsigned long seq; ··· 1624 1624 } 1625 1625 1626 1626 /* returns true if host is using tsc clocksource */ 1627 - static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) 1627 + static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now) 1628 1628 { 1629 1629 /* checked again under seqlock below */ 1630 1630 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
+1 -1
arch/x86/lguest/boot.c
··· 916 916 * If we can't use the TSC, the kernel falls back to our lower-priority 917 917 * "lguest_clock", where we read the time value given to us by the Host. 918 918 */ 919 - static cycle_t lguest_clock_read(struct clocksource *cs) 919 + static u64 lguest_clock_read(struct clocksource *cs) 920 920 { 921 921 unsigned long sec, nsec; 922 922
+4 -4
arch/x86/platform/uv/uv_time.c
··· 30 30 31 31 #define RTC_NAME "sgi_rtc" 32 32 33 - static cycle_t uv_read_rtc(struct clocksource *cs); 33 + static u64 uv_read_rtc(struct clocksource *cs); 34 34 static int uv_rtc_next_event(unsigned long, struct clock_event_device *); 35 35 static int uv_rtc_shutdown(struct clock_event_device *evt); 36 36 ··· 38 38 .name = RTC_NAME, 39 39 .rating = 299, 40 40 .read = uv_read_rtc, 41 - .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, 41 + .mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK, 42 42 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 43 43 }; 44 44 ··· 296 296 * cachelines of it's own page. This allows faster simultaneous reads 297 297 * from a given socket. 298 298 */ 299 - static cycle_t uv_read_rtc(struct clocksource *cs) 299 + static u64 uv_read_rtc(struct clocksource *cs) 300 300 { 301 301 unsigned long offset; 302 302 ··· 305 305 else 306 306 offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; 307 307 308 - return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); 308 + return (u64)uv_read_local_mmr(UVH_RTC | offset); 309 309 } 310 310 311 311 /*
+3 -3
arch/x86/xen/time.c
··· 39 39 return pvclock_tsc_khz(info); 40 40 } 41 41 42 - cycle_t xen_clocksource_read(void) 42 + u64 xen_clocksource_read(void) 43 43 { 44 44 struct pvclock_vcpu_time_info *src; 45 - cycle_t ret; 45 + u64 ret; 46 46 47 47 preempt_disable_notrace(); 48 48 src = &__this_cpu_read(xen_vcpu)->time; ··· 51 51 return ret; 52 52 } 53 53 54 - static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) 54 + static u64 xen_clocksource_get_cycles(struct clocksource *cs) 55 55 { 56 56 return xen_clocksource_read(); 57 57 }
+1 -1
arch/x86/xen/xen-ops.h
··· 67 67 void xen_setup_timer(int cpu); 68 68 void xen_setup_runstate_info(int cpu); 69 69 void xen_teardown_timer(int cpu); 70 - cycle_t xen_clocksource_read(void); 70 + u64 xen_clocksource_read(void); 71 71 void xen_setup_cpu_clockevents(void); 72 72 void __init xen_init_time_ops(void); 73 73 void __init xen_hvm_init_time_ops(void);
+2 -2
arch/xtensa/kernel/time.c
··· 34 34 unsigned long ccount_freq; /* ccount Hz */ 35 35 EXPORT_SYMBOL(ccount_freq); 36 36 37 - static cycle_t ccount_read(struct clocksource *cs) 37 + static u64 ccount_read(struct clocksource *cs) 38 38 { 39 - return (cycle_t)get_ccount(); 39 + return (u64)get_ccount(); 40 40 } 41 41 42 42 static u64 notrace ccount_sched_clock_read(void)
+1 -1
block/blk-mq.c
··· 2569 2569 * This will be replaced with the stats tracking code, using 2570 2570 * 'avg_completion_time / 2' as the pre-sleep target. 2571 2571 */ 2572 - kt = ktime_set(0, nsecs); 2572 + kt = nsecs; 2573 2573 2574 2574 mode = HRTIMER_MODE_REL; 2575 2575 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
+1 -1
drivers/base/power/main.c
··· 194 194 195 195 static ktime_t initcall_debug_start(struct device *dev) 196 196 { 197 - ktime_t calltime = ktime_set(0, 0); 197 + ktime_t calltime = 0; 198 198 199 199 if (pm_print_times_enabled) { 200 200 pr_info("calling %s+ @ %i, parent: %s\n",
+2 -2
drivers/base/power/wakeup.c
··· 998 998 999 999 active_time = ktime_sub(now, ws->last_time); 1000 1000 total_time = ktime_add(total_time, active_time); 1001 - if (active_time.tv64 > max_time.tv64) 1001 + if (active_time > max_time) 1002 1002 max_time = active_time; 1003 1003 1004 1004 if (ws->autosleep_enabled) 1005 1005 prevent_sleep_time = ktime_add(prevent_sleep_time, 1006 1006 ktime_sub(now, ws->start_prevent_time)); 1007 1007 } else { 1008 - active_time = ktime_set(0, 0); 1008 + active_time = 0; 1009 1009 } 1010 1010 1011 1011 seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+1 -1
drivers/block/null_blk.c
··· 257 257 258 258 static void null_cmd_end_timer(struct nullb_cmd *cmd) 259 259 { 260 - ktime_t kt = ktime_set(0, completion_nsec); 260 + ktime_t kt = completion_nsec; 261 261 262 262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); 263 263 }
+2 -2
drivers/char/hpet.c
··· 69 69 #ifdef CONFIG_IA64 70 70 static void __iomem *hpet_mctr; 71 71 72 - static cycle_t read_hpet(struct clocksource *cs) 72 + static u64 read_hpet(struct clocksource *cs) 73 73 { 74 - return (cycle_t)read_counter((void __iomem *)hpet_mctr); 74 + return (u64)read_counter((void __iomem *)hpet_mctr); 75 75 } 76 76 77 77 static struct clocksource clocksource_hpet = {
+7 -7
drivers/clocksource/acpi_pm.c
··· 58 58 return v2; 59 59 } 60 60 61 - static cycle_t acpi_pm_read(struct clocksource *cs) 61 + static u64 acpi_pm_read(struct clocksource *cs) 62 62 { 63 - return (cycle_t)read_pmtmr(); 63 + return (u64)read_pmtmr(); 64 64 } 65 65 66 66 static struct clocksource clocksource_acpi_pm = { 67 67 .name = "acpi_pm", 68 68 .rating = 200, 69 69 .read = acpi_pm_read, 70 - .mask = (cycle_t)ACPI_PM_MASK, 70 + .mask = (u64)ACPI_PM_MASK, 71 71 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 72 72 }; 73 73 ··· 81 81 } 82 82 __setup("acpi_pm_good", acpi_pm_good_setup); 83 83 84 - static cycle_t acpi_pm_read_slow(struct clocksource *cs) 84 + static u64 acpi_pm_read_slow(struct clocksource *cs) 85 85 { 86 - return (cycle_t)acpi_pm_read_verified(); 86 + return (u64)acpi_pm_read_verified(); 87 87 } 88 88 89 89 static inline void acpi_pm_need_workaround(void) ··· 145 145 */ 146 146 static int verify_pmtmr_rate(void) 147 147 { 148 - cycle_t value1, value2; 148 + u64 value1, value2; 149 149 unsigned long count, delta; 150 150 151 151 mach_prepare_counter(); ··· 175 175 176 176 static int __init init_acpi_pm_clocksource(void) 177 177 { 178 - cycle_t value1, value2; 178 + u64 value1, value2; 179 179 unsigned int i, j = 0; 180 180 181 181 if (!pmtmr_ioport)
+6 -6
drivers/clocksource/arc_timer.c
··· 56 56 57 57 #ifdef CONFIG_ARC_TIMERS_64BIT 58 58 59 - static cycle_t arc_read_gfrc(struct clocksource *cs) 59 + static u64 arc_read_gfrc(struct clocksource *cs) 60 60 { 61 61 unsigned long flags; 62 62 u32 l, h; ··· 71 71 72 72 local_irq_restore(flags); 73 73 74 - return (((cycle_t)h) << 32) | l; 74 + return (((u64)h) << 32) | l; 75 75 } 76 76 77 77 static struct clocksource arc_counter_gfrc = { ··· 105 105 #define AUX_RTC_LOW 0x104 106 106 #define AUX_RTC_HIGH 0x105 107 107 108 - static cycle_t arc_read_rtc(struct clocksource *cs) 108 + static u64 arc_read_rtc(struct clocksource *cs) 109 109 { 110 110 unsigned long status; 111 111 u32 l, h; ··· 122 122 status = read_aux_reg(AUX_RTC_CTRL); 123 123 } while (!(status & _BITUL(31))); 124 124 125 - return (((cycle_t)h) << 32) | l; 125 + return (((u64)h) << 32) | l; 126 126 } 127 127 128 128 static struct clocksource arc_counter_rtc = { ··· 166 166 * 32bit TIMER1 to keep counting monotonically and wraparound 167 167 */ 168 168 169 - static cycle_t arc_read_timer1(struct clocksource *cs) 169 + static u64 arc_read_timer1(struct clocksource *cs) 170 170 { 171 - return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); 171 + return (u64) read_aux_reg(ARC_REG_TIMER1_CNT); 172 172 } 173 173 174 174 static struct clocksource arc_counter_timer1 = {
+2 -2
drivers/clocksource/arm_arch_timer.c
··· 562 562 */ 563 563 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 564 564 565 - static cycle_t arch_counter_read(struct clocksource *cs) 565 + static u64 arch_counter_read(struct clocksource *cs) 566 566 { 567 567 return arch_timer_read_counter(); 568 568 } 569 569 570 - static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 570 + static u64 arch_counter_read_cc(const struct cyclecounter *cc) 571 571 { 572 572 return arch_timer_read_counter(); 573 573 }
+1 -1
drivers/clocksource/arm_global_timer.c
··· 195 195 return 0; 196 196 } 197 197 198 - static cycle_t gt_clocksource_read(struct clocksource *cs) 198 + static u64 gt_clocksource_read(struct clocksource *cs) 199 199 { 200 200 return gt_counter_read(); 201 201 }
+2 -2
drivers/clocksource/cadence_ttc_timer.c
··· 158 158 * 159 159 * returns: Current timer counter register value 160 160 **/ 161 - static cycle_t __ttc_clocksource_read(struct clocksource *cs) 161 + static u64 __ttc_clocksource_read(struct clocksource *cs) 162 162 { 163 163 struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; 164 164 165 - return (cycle_t)readl_relaxed(timer->base_addr + 165 + return (u64)readl_relaxed(timer->base_addr + 166 166 TTC_COUNT_VAL_OFFSET); 167 167 } 168 168
+1 -1
drivers/clocksource/clksrc-dbx500-prcmu.c
··· 30 30 31 31 static void __iomem *clksrc_dbx500_timer_base; 32 32 33 - static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) 33 + static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) 34 34 { 35 35 void __iomem *base = clksrc_dbx500_timer_base; 36 36 u32 count, count2;
+4 -4
drivers/clocksource/dw_apb_timer.c
··· 348 348 dw_apb_clocksource_read(dw_cs); 349 349 } 350 350 351 - static cycle_t __apbt_read_clocksource(struct clocksource *cs) 351 + static u64 __apbt_read_clocksource(struct clocksource *cs) 352 352 { 353 353 u32 current_count; 354 354 struct dw_apb_clocksource *dw_cs = ··· 357 357 current_count = apbt_readl_relaxed(&dw_cs->timer, 358 358 APBTMR_N_CURRENT_VALUE); 359 359 360 - return (cycle_t)~current_count; 360 + return (u64)~current_count; 361 361 } 362 362 363 363 static void apbt_restart_clocksource(struct clocksource *cs) ··· 416 416 * 417 417 * @dw_cs: The clocksource to read. 418 418 */ 419 - cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) 419 + u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) 420 420 { 421 - return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); 421 + return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); 422 422 }
+6 -6
drivers/clocksource/em_sti.c
··· 110 110 clk_disable_unprepare(p->clk); 111 111 } 112 112 113 - static cycle_t em_sti_count(struct em_sti_priv *p) 113 + static u64 em_sti_count(struct em_sti_priv *p) 114 114 { 115 - cycle_t ticks; 115 + u64 ticks; 116 116 unsigned long flags; 117 117 118 118 /* the STI hardware buffers the 48-bit count, but to ··· 121 121 * Always read STI_COUNT_H before STI_COUNT_L. 122 122 */ 123 123 raw_spin_lock_irqsave(&p->lock, flags); 124 - ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; 124 + ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; 125 125 ticks |= em_sti_read(p, STI_COUNT_L); 126 126 raw_spin_unlock_irqrestore(&p->lock, flags); 127 127 128 128 return ticks; 129 129 } 130 130 131 - static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) 131 + static u64 em_sti_set_next(struct em_sti_priv *p, u64 next) 132 132 { 133 133 unsigned long flags; 134 134 ··· 198 198 return container_of(cs, struct em_sti_priv, cs); 199 199 } 200 200 201 - static cycle_t em_sti_clocksource_read(struct clocksource *cs) 201 + static u64 em_sti_clocksource_read(struct clocksource *cs) 202 202 { 203 203 return em_sti_count(cs_to_em_sti(cs)); 204 204 } ··· 271 271 struct clock_event_device *ced) 272 272 { 273 273 struct em_sti_priv *p = ced_to_em_sti(ced); 274 - cycle_t next; 274 + u64 next; 275 275 int safe; 276 276 277 277 next = em_sti_set_next(p, em_sti_count(p) + delta);
+3 -3
drivers/clocksource/exynos_mct.c
··· 183 183 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 184 184 } while (hi != hi2); 185 185 186 - return ((cycle_t)hi << 32) | lo; 186 + return ((u64)hi << 32) | lo; 187 187 } 188 188 189 189 /** ··· 199 199 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 200 200 } 201 201 202 - static cycle_t exynos4_frc_read(struct clocksource *cs) 202 + static u64 exynos4_frc_read(struct clocksource *cs) 203 203 { 204 204 return exynos4_read_count_32(); 205 205 } ··· 266 266 static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) 267 267 { 268 268 unsigned int tcon; 269 - cycle_t comp_cycle; 269 + u64 comp_cycle; 270 270 271 271 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 272 272
+1 -1
drivers/clocksource/h8300_timer16.c
··· 72 72 return container_of(cs, struct timer16_priv, cs); 73 73 } 74 74 75 - static cycle_t timer16_clocksource_read(struct clocksource *cs) 75 + static u64 timer16_clocksource_read(struct clocksource *cs) 76 76 { 77 77 struct timer16_priv *p = cs_to_priv(cs); 78 78 unsigned long raw, value;
+1 -1
drivers/clocksource/h8300_tpu.c
··· 64 64 return container_of(cs, struct tpu_priv, cs); 65 65 } 66 66 67 - static cycle_t tpu_clocksource_read(struct clocksource *cs) 67 + static u64 tpu_clocksource_read(struct clocksource *cs) 68 68 { 69 69 struct tpu_priv *p = cs_to_priv(cs); 70 70 unsigned long flags;
+2 -2
drivers/clocksource/i8253.c
··· 25 25 * to just read by itself. So use jiffies to emulate a free 26 26 * running counter: 27 27 */ 28 - static cycle_t i8253_read(struct clocksource *cs) 28 + static u64 i8253_read(struct clocksource *cs) 29 29 { 30 30 static int old_count; 31 31 static u32 old_jifs; ··· 83 83 84 84 count = (PIT_LATCH - 1) - count; 85 85 86 - return (cycle_t)(jifs * PIT_LATCH) + count; 86 + return (u64)(jifs * PIT_LATCH) + count; 87 87 } 88 88 89 89 static struct clocksource i8253_cs = {
+1 -1
drivers/clocksource/jcore-pit.c
··· 57 57 return seclo * NSEC_PER_SEC + nsec; 58 58 } 59 59 60 - static cycle_t jcore_clocksource_read(struct clocksource *cs) 60 + static u64 jcore_clocksource_read(struct clocksource *cs) 61 61 { 62 62 return jcore_sched_clock_read(); 63 63 }
+1 -1
drivers/clocksource/metag_generic.c
··· 56 56 return 0; 57 57 } 58 58 59 - static cycle_t metag_clocksource_read(struct clocksource *cs) 59 + static u64 metag_clocksource_read(struct clocksource *cs) 60 60 { 61 61 return __core_reg_get(TXTIMER); 62 62 }
+1 -1
drivers/clocksource/mips-gic-timer.c
··· 125 125 return 0; 126 126 } 127 127 128 - static cycle_t gic_hpt_read(struct clocksource *cs) 128 + static u64 gic_hpt_read(struct clocksource *cs) 129 129 { 130 130 return gic_read_count(); 131 131 }
+9 -9
drivers/clocksource/mmio.c
··· 20 20 return container_of(c, struct clocksource_mmio, clksrc); 21 21 } 22 22 23 - cycle_t clocksource_mmio_readl_up(struct clocksource *c) 23 + u64 clocksource_mmio_readl_up(struct clocksource *c) 24 24 { 25 - return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg); 25 + return (u64)readl_relaxed(to_mmio_clksrc(c)->reg); 26 26 } 27 27 28 - cycle_t clocksource_mmio_readl_down(struct clocksource *c) 28 + u64 clocksource_mmio_readl_down(struct clocksource *c) 29 29 { 30 - return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; 30 + return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; 31 31 } 32 32 33 - cycle_t clocksource_mmio_readw_up(struct clocksource *c) 33 + u64 clocksource_mmio_readw_up(struct clocksource *c) 34 34 { 35 - return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg); 35 + return (u64)readw_relaxed(to_mmio_clksrc(c)->reg); 36 36 } 37 37 38 - cycle_t clocksource_mmio_readw_down(struct clocksource *c) 38 + u64 clocksource_mmio_readw_down(struct clocksource *c) 39 39 { 40 - return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; 40 + return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; 41 41 } 42 42 43 43 /** ··· 51 51 */ 52 52 int __init clocksource_mmio_init(void __iomem *base, const char *name, 53 53 unsigned long hz, int rating, unsigned bits, 54 - cycle_t (*read)(struct clocksource *)) 54 + u64 (*read)(struct clocksource *)) 55 55 { 56 56 struct clocksource_mmio *cs; 57 57
+1 -1
drivers/clocksource/mxs_timer.c
··· 97 97 HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR); 98 98 } 99 99 100 - static cycle_t timrotv1_get_cycles(struct clocksource *cs) 100 + static u64 timrotv1_get_cycles(struct clocksource *cs) 101 101 { 102 102 return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)) 103 103 & 0xffff0000) >> 16);
+1 -1
drivers/clocksource/qcom-timer.c
··· 89 89 90 90 static void __iomem *source_base; 91 91 92 - static notrace cycle_t msm_read_timer_count(struct clocksource *cs) 92 + static notrace u64 msm_read_timer_count(struct clocksource *cs) 93 93 { 94 94 return readl_relaxed(source_base + TIMER_COUNT_VAL); 95 95 }
+1 -1
drivers/clocksource/samsung_pwm_timer.c
··· 307 307 samsung_time_start(pwm.source_id, true); 308 308 } 309 309 310 - static cycle_t notrace samsung_clocksource_read(struct clocksource *c) 310 + static u64 notrace samsung_clocksource_read(struct clocksource *c) 311 311 { 312 312 return ~readl_relaxed(pwm.source_reg); 313 313 }
+2 -2
drivers/clocksource/scx200_hrt.c
··· 43 43 /* The base timer frequency, * 27 if selected */ 44 44 #define HRT_FREQ 1000000 45 45 46 - static cycle_t read_hrt(struct clocksource *cs) 46 + static u64 read_hrt(struct clocksource *cs) 47 47 { 48 48 /* Read the timer value */ 49 - return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); 49 + return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET); 50 50 } 51 51 52 52 static struct clocksource cs_hrt = {
+1 -1
drivers/clocksource/sh_cmt.c
··· 612 612 return container_of(cs, struct sh_cmt_channel, cs); 613 613 } 614 614 615 - static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) 615 + static u64 sh_cmt_clocksource_read(struct clocksource *cs) 616 616 { 617 617 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 618 618 unsigned long flags, raw;
+1 -1
drivers/clocksource/sh_tmu.c
··· 255 255 return container_of(cs, struct sh_tmu_channel, cs); 256 256 } 257 257 258 - static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) 258 + static u64 sh_tmu_clocksource_read(struct clocksource *cs) 259 259 { 260 260 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); 261 261
+2 -2
drivers/clocksource/tcb_clksrc.c
··· 41 41 42 42 static void __iomem *tcaddr; 43 43 44 - static cycle_t tc_get_cycles(struct clocksource *cs) 44 + static u64 tc_get_cycles(struct clocksource *cs) 45 45 { 46 46 unsigned long flags; 47 47 u32 lower, upper; ··· 56 56 return (upper << 16) | lower; 57 57 } 58 58 59 - static cycle_t tc_get_cycles32(struct clocksource *cs) 59 + static u64 tc_get_cycles32(struct clocksource *cs) 60 60 { 61 61 return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); 62 62 }
+2 -2
drivers/clocksource/time-pistachio.c
··· 67 67 writel(value, base + 0x20 * gpt_id + offset); 68 68 } 69 69 70 - static cycle_t notrace 70 + static u64 notrace 71 71 pistachio_clocksource_read_cycles(struct clocksource *cs) 72 72 { 73 73 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); ··· 84 84 counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); 85 85 raw_spin_unlock_irqrestore(&pcs->lock, flags); 86 86 87 - return (cycle_t)~counter; 87 + return (u64)~counter; 88 88 } 89 89 90 90 static u64 notrace pistachio_read_sched_clock(void)
+1 -1
drivers/clocksource/timer-atlas7.c
··· 85 85 } 86 86 87 87 /* read 64-bit timer counter */ 88 - static cycle_t sirfsoc_timer_read(struct clocksource *cs) 88 + static u64 sirfsoc_timer_read(struct clocksource *cs) 89 89 { 90 90 u64 cycles; 91 91
+1 -1
drivers/clocksource/timer-atmel-pit.c
··· 73 73 * Clocksource: just a monotonic counter of MCK/16 cycles. 74 74 * We don't care whether or not PIT irqs are enabled. 75 75 */ 76 - static cycle_t read_pit_clk(struct clocksource *cs) 76 + static u64 read_pit_clk(struct clocksource *cs) 77 77 { 78 78 struct pit_data *data = clksrc_to_pit_data(cs); 79 79 unsigned long flags;
+1 -1
drivers/clocksource/timer-atmel-st.c
··· 92 92 return IRQ_NONE; 93 93 } 94 94 95 - static cycle_t read_clk32k(struct clocksource *cs) 95 + static u64 read_clk32k(struct clocksource *cs) 96 96 { 97 97 return read_CRTR(); 98 98 }
+2 -2
drivers/clocksource/timer-nps.c
··· 77 77 return 0; 78 78 } 79 79 80 - static cycle_t nps_clksrc_read(struct clocksource *clksrc) 80 + static u64 nps_clksrc_read(struct clocksource *clksrc) 81 81 { 82 82 int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; 83 83 84 - return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); 84 + return (u64)ioread32be(nps_msu_reg_low_addr[cluster]); 85 85 } 86 86 87 87 static int __init nps_setup_clocksource(struct device_node *node)
+1 -1
drivers/clocksource/timer-prima2.c
··· 72 72 } 73 73 74 74 /* read 64-bit timer counter */ 75 - static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs) 75 + static u64 notrace sirfsoc_timer_read(struct clocksource *cs) 76 76 { 77 77 u64 cycles; 78 78
+1 -1
drivers/clocksource/timer-sun5i.c
··· 152 152 return IRQ_HANDLED; 153 153 } 154 154 155 - static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) 155 + static u64 sun5i_clksrc_read(struct clocksource *clksrc) 156 156 { 157 157 struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); 158 158
+2 -2
drivers/clocksource/timer-ti-32k.c
··· 65 65 return container_of(cs, struct ti_32k, cs); 66 66 } 67 67 68 - static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs) 68 + static u64 notrace ti_32k_read_cycles(struct clocksource *cs) 69 69 { 70 70 struct ti_32k *ti = to_ti_32k(cs); 71 71 72 - return (cycle_t)readl_relaxed(ti->counter); 72 + return (u64)readl_relaxed(ti->counter); 73 73 } 74 74 75 75 static struct ti_32k ti_32k_timer = {
+2 -2
drivers/clocksource/vt8500_timer.c
··· 53 53 54 54 static void __iomem *regbase; 55 55 56 - static cycle_t vt8500_timer_read(struct clocksource *cs) 56 + static u64 vt8500_timer_read(struct clocksource *cs) 57 57 { 58 58 int loops = msecs_to_loops(10); 59 59 writel(3, regbase + TIMER_CTRL_VAL); ··· 75 75 struct clock_event_device *evt) 76 76 { 77 77 int loops = msecs_to_loops(10); 78 - cycle_t alarm = clocksource.read(&clocksource) + cycles; 78 + u64 alarm = clocksource.read(&clocksource) + cycles; 79 79 while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) 80 80 && --loops) 81 81 cpu_relax();
+2 -2
drivers/dma/dmatest.c
··· 429 429 int dst_cnt; 430 430 int i; 431 431 ktime_t ktime, start, diff; 432 - ktime_t filltime = ktime_set(0, 0); 433 - ktime_t comparetime = ktime_set(0, 0); 432 + ktime_t filltime = 0; 433 + ktime_t comparetime = 0; 434 434 s64 runtime = 0; 435 435 unsigned long long total_len = 0; 436 436 u8 align = 0;
+3 -3
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 752 752 753 753 drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); 754 754 dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); 755 - hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), 755 + hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD, 756 756 HRTIMER_MODE_REL); 757 757 758 758 return HRTIMER_NORESTART; ··· 772 772 hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer, 773 773 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 774 774 hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer, 775 - ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); 775 + DCE_VIRTUAL_VBLANK_PERIOD); 776 776 adev->mode_info.crtcs[crtc]->vblank_timer.function = 777 777 dce_virtual_vblank_timer_handle; 778 778 hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer, 779 - ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); 779 + DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL); 780 780 } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { 781 781 DRM_DEBUG("Disable software vsync timer\n"); 782 782 hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+1 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 62 62 { 63 63 d->wake_count++; 64 64 hrtimer_start_range_ns(&d->timer, 65 - ktime_set(0, NSEC_PER_MSEC), 65 + NSEC_PER_MSEC, 66 66 NSEC_PER_MSEC, 67 67 HRTIMER_MODE_REL); 68 68 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 330 330 __set_current_state(intr ? TASK_INTERRUPTIBLE : 331 331 TASK_UNINTERRUPTIBLE); 332 332 333 - kt = ktime_set(0, sleep_time); 333 + kt = sleep_time; 334 334 schedule_hrtimeout(&kt, HRTIMER_MODE_REL); 335 335 sleep_time *= 2; 336 336 if (sleep_time > NSEC_PER_MSEC)
+1 -1
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
··· 539 539 } 540 540 541 541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 542 - tilcdc_crtc->last_vblank = ktime_set(0, 0); 542 + tilcdc_crtc->last_vblank = 0; 543 543 544 544 tilcdc_crtc->enabled = false; 545 545 mutex_unlock(&tilcdc_crtc->enable_lock);
+4 -4
drivers/hv/hv.c
··· 135 135 EXPORT_SYMBOL_GPL(hv_do_hypercall); 136 136 137 137 #ifdef CONFIG_X86_64 138 - static cycle_t read_hv_clock_tsc(struct clocksource *arg) 138 + static u64 read_hv_clock_tsc(struct clocksource *arg) 139 139 { 140 - cycle_t current_tick; 140 + u64 current_tick; 141 141 struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page; 142 142 143 143 if (tsc_pg->tsc_sequence != 0) { ··· 146 146 */ 147 147 148 148 while (1) { 149 - cycle_t tmp; 149 + u64 tmp; 150 150 u32 sequence = tsc_pg->tsc_sequence; 151 151 u64 cur_tsc; 152 152 u64 scale = tsc_pg->tsc_scale; ··· 350 350 static int hv_ce_set_next_event(unsigned long delta, 351 351 struct clock_event_device *evt) 352 352 { 353 - cycle_t current_tick; 353 + u64 current_tick; 354 354 355 355 WARN_ON(!clockevent_state_oneshot(evt)); 356 356
+2 -3
drivers/iio/trigger/iio-trig-hrtimer.c
··· 63 63 return -EINVAL; 64 64 65 65 info->sampling_frequency = val; 66 - info->period = ktime_set(0, NSEC_PER_SEC / val); 66 + info->period = NSEC_PER_SEC / val; 67 67 68 68 return len; 69 69 } ··· 141 141 trig_info->timer.function = iio_hrtimer_trig_handler; 142 142 143 143 trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY; 144 - trig_info->period = ktime_set(0, NSEC_PER_SEC / 145 - trig_info->sampling_frequency); 144 + trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency; 146 145 147 146 ret = iio_trigger_register(trig_info->swt.trigger); 148 147 if (ret)
+1 -1
drivers/input/joystick/walkera0701.c
··· 165 165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ 166 166 w->counter = 0; 167 167 168 - hrtimer_start(&w->timer, ktime_set(0, BIN_SAMPLE), HRTIMER_MODE_REL); 168 + hrtimer_start(&w->timer, BIN_SAMPLE, HRTIMER_MODE_REL); 169 169 } 170 170 171 171 static enum hrtimer_restart timer_handler(struct hrtimer
+8 -8
drivers/irqchip/irq-mips-gic.c
··· 152 152 } 153 153 154 154 #ifdef CONFIG_CLKSRC_MIPS_GIC 155 - cycle_t gic_read_count(void) 155 + u64 gic_read_count(void) 156 156 { 157 157 unsigned int hi, hi2, lo; 158 158 159 159 if (mips_cm_is64) 160 - return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); 160 + return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); 161 161 162 162 do { 163 163 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); ··· 165 165 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 166 166 } while (hi2 != hi); 167 167 168 - return (((cycle_t) hi) << 32) + lo; 168 + return (((u64) hi) << 32) + lo; 169 169 } 170 170 171 171 unsigned int gic_get_count_width(void) ··· 179 179 return bits; 180 180 } 181 181 182 - void gic_write_compare(cycle_t cnt) 182 + void gic_write_compare(u64 cnt) 183 183 { 184 184 if (mips_cm_is64) { 185 185 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); ··· 191 191 } 192 192 } 193 193 194 - void gic_write_cpu_compare(cycle_t cnt, int cpu) 194 + void gic_write_cpu_compare(u64 cnt, int cpu) 195 195 { 196 196 unsigned long flags; 197 197 ··· 211 211 local_irq_restore(flags); 212 212 } 213 213 214 - cycle_t gic_read_compare(void) 214 + u64 gic_read_compare(void) 215 215 { 216 216 unsigned int hi, lo; 217 217 218 218 if (mips_cm_is64) 219 - return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); 219 + return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); 220 220 221 221 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); 222 222 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); 223 223 224 - return (((cycle_t) hi) << 32) + lo; 224 + return (((u64) hi) << 32) + lo; 225 225 } 226 226 227 227 void gic_start_count(void)
+1 -2
drivers/mailbox/mailbox.c
··· 87 87 88 88 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) 89 89 /* kick start the timer immediately to avoid delays */ 90 - hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0), 91 - HRTIMER_MODE_REL); 90 + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 92 91 } 93 92 94 93 static void tx_tick(struct mbox_chan *chan, int r)
+1 -1
drivers/media/dvb-core/dmxdev.c
··· 562 562 struct dmxdev_filter *filter, 563 563 struct dmxdev_feed *feed) 564 564 { 565 - ktime_t timeout = ktime_set(0, 0); 565 + ktime_t timeout = 0; 566 566 struct dmx_pes_filter_params *para = &filter->params.pes; 567 567 dmx_output_t otype; 568 568 int ret;
+2 -4
drivers/media/pci/cx88/cx88-input.c
··· 178 178 struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer); 179 179 180 180 cx88_ir_handle_key(ir); 181 - missed = hrtimer_forward_now(&ir->timer, 182 - ktime_set(0, ir->polling * 1000000)); 181 + missed = hrtimer_forward_now(&ir->timer, ir->polling * 1000000); 183 182 if (missed > 1) 184 183 ir_dprintk("Missed ticks %ld\n", missed - 1); 185 184 ··· 198 199 if (ir->polling) { 199 200 hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 200 201 ir->timer.function = cx88_ir_work; 201 - hrtimer_start(&ir->timer, 202 - ktime_set(0, ir->polling * 1000000), 202 + hrtimer_start(&ir->timer, ir->polling * 1000000, 203 203 HRTIMER_MODE_REL); 204 204 } 205 205 if (ir->sampling) {
+1 -1
drivers/media/pci/pt3/pt3.c
··· 463 463 464 464 pt3_proc_dma(adap); 465 465 466 - delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC); 466 + delay = PT3_FETCH_DELAY * NSEC_PER_MSEC; 467 467 set_current_state(TASK_UNINTERRUPTIBLE); 468 468 freezable_schedule_hrtimeout_range(&delay, 469 469 PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
+1 -1
drivers/media/rc/ir-rx51.c
··· 109 109 110 110 now = timer->base->get_time(); 111 111 112 - } while (hrtimer_get_expires_tv64(timer) < now.tv64); 112 + } while (hrtimer_get_expires_tv64(timer) < now); 113 113 114 114 return HRTIMER_RESTART; 115 115 end:
+2 -2
drivers/net/can/softing/softing_fw.c
··· 390 390 ovf = 0x100000000ULL * 16; 391 391 do_div(ovf, card->pdat->freq ?: 16); 392 392 393 - card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf); 393 + card->ts_overflow = ktime_add_us(0, ovf); 394 394 } 395 395 396 396 ktime_t softing_raw2ktime(struct softing *card, u32 raw) ··· 647 647 open_candev(netdev); 648 648 if (dev != netdev) { 649 649 /* notify other busses on the restart */ 650 - softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); 650 + softing_netdev_rx(netdev, &msg, 0); 651 651 ++priv->can.can_stats.restarts; 652 652 } 653 653 netif_wake_queue(netdev);
+1 -1
drivers/net/can/softing/softing_main.c
··· 192 192 /* a dead bus has no overflows */ 193 193 continue; 194 194 ++netdev->stats.rx_over_errors; 195 - softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); 195 + softing_netdev_rx(netdev, &msg, 0); 196 196 } 197 197 /* prepare for other use */ 198 198 memset(&msg, 0, sizeof(msg));
+1 -1
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
··· 122 122 #include "xgbe.h" 123 123 #include "xgbe-common.h" 124 124 125 - static cycle_t xgbe_cc_read(const struct cyclecounter *cc) 125 + static u64 xgbe_cc_read(const struct cyclecounter *cc) 126 126 { 127 127 struct xgbe_prv_data *pdata = container_of(cc, 128 128 struct xgbe_prv_data,
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 15223 15223 } 15224 15224 15225 15225 /* Read the PHC */ 15226 - static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc) 15226 + static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc) 15227 15227 { 15228 15228 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); 15229 15229 int port = BP_PORT(bp);
+2 -3
drivers/net/ethernet/ec_bhf.c
··· 253 253 if (!netif_running(priv->net_dev)) 254 254 return HRTIMER_NORESTART; 255 255 256 - hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); 256 + hrtimer_forward_now(timer, polling_frequency); 257 257 return HRTIMER_RESTART; 258 258 } 259 259 ··· 427 427 428 428 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 429 429 priv->hrtimer.function = ec_bhf_timer_fun; 430 - hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), 431 - HRTIMER_MODE_REL); 430 + hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL); 432 431 433 432 return 0; 434 433
+1 -1
drivers/net/ethernet/freescale/fec_ptp.c
··· 230 230 * cyclecounter structure used to construct a ns counter from the 231 231 * arbitrary fixed point registers 232 232 */ 233 - static cycle_t fec_ptp_read(const struct cyclecounter *cc) 233 + static u64 fec_ptp_read(const struct cyclecounter *cc) 234 234 { 235 235 struct fec_enet_private *fep = 236 236 container_of(cc, struct fec_enet_private, cc);
+9 -9
drivers/net/ethernet/intel/e1000e/netdev.c
··· 4305 4305 /** 4306 4306 * e1000e_sanitize_systim - sanitize raw cycle counter reads 4307 4307 * @hw: pointer to the HW structure 4308 - * @systim: cycle_t value read, sanitized and returned 4308 + * @systim: time value read, sanitized and returned 4309 4309 * 4310 4310 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: 4311 4311 * check to see that the time is incrementing at a reasonable 4312 4312 * rate and is a multiple of incvalue. 4313 4313 **/ 4314 - static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) 4314 + static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) 4315 4315 { 4316 4316 u64 time_delta, rem, temp; 4317 - cycle_t systim_next; 4317 + u64 systim_next; 4318 4318 u32 incvalue; 4319 4319 int i; 4320 4320 4321 4321 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; 4322 4322 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { 4323 4323 /* latch SYSTIMH on read of SYSTIML */ 4324 - systim_next = (cycle_t)er32(SYSTIML); 4325 - systim_next |= (cycle_t)er32(SYSTIMH) << 32; 4324 + systim_next = (u64)er32(SYSTIML); 4325 + systim_next |= (u64)er32(SYSTIMH) << 32; 4326 4326 4327 4327 time_delta = systim_next - systim; 4328 4328 temp = time_delta; ··· 4342 4342 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4343 4343 * @cc: cyclecounter structure 4344 4344 **/ 4345 - static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) 4345 + static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) 4346 4346 { 4347 4347 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4348 4348 cc); 4349 4349 struct e1000_hw *hw = &adapter->hw; 4350 4350 u32 systimel, systimeh; 4351 - cycle_t systim; 4351 + u64 systim; 4352 4352 /* SYSTIMH latching upon SYSTIML read does not work well. 4353 4353 * This means that if SYSTIML overflows after we read it but before 4354 4354 * we read SYSTIMH, the value of SYSTIMH has been incremented and we ··· 4368 4368 systimel = systimel_2; 4369 4369 } 4370 4370 } 4371 - systim = (cycle_t)systimel; 4372 - systim |= (cycle_t)systimeh << 32; 4371 + systim = (u64)systimel; 4372 + systim |= (u64)systimeh << 32; 4373 4373 4374 4374 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) 4375 4375 systim = e1000e_sanitize_systim(hw, systim);
+2 -2
drivers/net/ethernet/intel/e1000e/ptp.c
··· 127 127 unsigned long flags; 128 128 int i; 129 129 u32 tsync_ctrl; 130 - cycle_t dev_cycles; 131 - cycle_t sys_cycles; 130 + u64 dev_cycles; 131 + u64 sys_cycles; 132 132 133 133 tsync_ctrl = er32(TSYNCTXCTL); 134 134 tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
+2 -2
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 77 77 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); 78 78 79 79 /* SYSTIM read access for the 82576 */ 80 - static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) 80 + static u64 igb_ptp_read_82576(const struct cyclecounter *cc) 81 81 { 82 82 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 83 83 struct e1000_hw *hw = &igb->hw; ··· 94 94 } 95 95 96 96 /* SYSTIM read access for the 82580 */ 97 - static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) 97 + static u64 igb_ptp_read_82580(const struct cyclecounter *cc) 98 98 { 99 99 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 100 100 struct e1000_hw *hw = &igb->hw;
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
··· 245 245 * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of 246 246 * "cycles", rather than seconds and nanoseconds. 247 247 */ 248 - static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) 248 + static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) 249 249 { 250 250 struct ixgbe_adapter *adapter = 251 251 container_of(hw_cc, struct ixgbe_adapter, hw_cc); ··· 282 282 * cyclecounter structure used to construct a ns counter from the 283 283 * arbitrary fixed point registers 284 284 */ 285 - static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc) 285 + static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc) 286 286 { 287 287 struct ixgbe_adapter *adapter = 288 288 container_of(cc, struct ixgbe_adapter, hw_cc);
+1 -1
drivers/net/ethernet/marvell/mvpp2.c
··· 4913 4913 4914 4914 if (!port_pcpu->timer_scheduled) { 4915 4915 port_pcpu->timer_scheduled = true; 4916 - interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS); 4916 + interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; 4917 4917 hrtimer_start(&port_pcpu->tx_done_timer, interval, 4918 4918 HRTIMER_MODE_REL_PINNED); 4919 4919 }
+1 -1
drivers/net/ethernet/mellanox/mlx4/en_clock.c
··· 38 38 39 39 /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) 40 40 */ 41 - static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) 41 + static u64 mlx4_en_read_clock(const struct cyclecounter *tc) 42 42 { 43 43 struct mlx4_en_dev *mdev = 44 44 container_of(tc, struct mlx4_en_dev, cycles);
+2 -2
drivers/net/ethernet/mellanox/mlx4/main.c
··· 1823 1823 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1824 1824 } 1825 1825 1826 - cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1826 + u64 mlx4_read_clock(struct mlx4_dev *dev) 1827 1827 { 1828 1828 u32 clockhi, clocklo, clockhi1; 1829 - cycle_t cycles; 1829 + u64 cycles; 1830 1830 int i; 1831 1831 struct mlx4_priv *priv = mlx4_priv(dev); 1832 1832
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
··· 49 49 hwts->hwtstamp = ns_to_ktime(nsec); 50 50 } 51 51 52 - static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc) 52 + static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) 53 53 { 54 54 struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, 55 55 cycles);
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 557 557 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 558 558 } 559 559 560 - cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) 560 + u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) 561 561 { 562 562 u32 timer_h, timer_h1, timer_l; 563 563 ··· 567 567 if (timer_h != timer_h1) /* wrap around */ 568 568 timer_l = ioread32be(&dev->iseg->internal_timer_l); 569 569 570 - return (cycle_t)timer_l | (cycle_t)timer_h1 << 32; 570 + return (u64)timer_l | (u64)timer_h1 << 32; 571 571 } 572 572 573 573 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 106 106 int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, 107 107 u32 element_id); 108 108 int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); 109 - cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); 109 + u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); 110 110 u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); 111 111 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); 112 112 void mlx5_cq_tasklet_cb(unsigned long data);
+1 -1
drivers/net/ethernet/ti/cpts.c
··· 121 121 return type == match ? 0 : -1; 122 122 } 123 123 124 - static cycle_t cpts_systim_read(const struct cyclecounter *cc) 124 + static u64 cpts_systim_read(const struct cyclecounter *cc) 125 125 { 126 126 u64 val = 0; 127 127 struct cpts_event *event;
+2 -2
drivers/net/ethernet/tile/tilegx.c
··· 751 751 &info->mpipe[instance].tx_wake[priv->echannel]; 752 752 753 753 hrtimer_start(&tx_wake->timer, 754 - ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 754 + TX_TIMER_DELAY_USEC * 1000UL, 755 755 HRTIMER_MODE_REL_PINNED); 756 756 } 757 757 ··· 770 770 771 771 if (!info->egress_timer_scheduled) { 772 772 hrtimer_start(&info->egress_timer, 773 - ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), 773 + EGRESS_TIMER_DELAY_USEC * 1000UL, 774 774 HRTIMER_MODE_REL_PINNED); 775 775 info->egress_timer_scheduled = true; 776 776 }
+4 -5
drivers/net/ieee802154/at86rf230.c
··· 510 510 case STATE_TRX_OFF: 511 511 switch (ctx->to_state) { 512 512 case STATE_RX_AACK_ON: 513 - tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); 513 + tim = c->t_off_to_aack * NSEC_PER_USEC; 514 514 /* state change from TRX_OFF to RX_AACK_ON to do a 515 515 * calibration, we need to reset the timeout for the 516 516 * next one. ··· 519 519 goto change; 520 520 case STATE_TX_ARET_ON: 521 521 case STATE_TX_ON: 522 - tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); 522 + tim = c->t_off_to_tx_on * NSEC_PER_USEC; 523 523 /* state change from TRX_OFF to TX_ON or ARET_ON to do 524 524 * a calibration, we need to reset the timeout for the 525 525 * next one. ··· 539 539 * to TX_ON or TRX_OFF. 540 540 */ 541 541 if (!force) { 542 - tim = ktime_set(0, (c->t_frame + c->t_p_ack) * 543 - NSEC_PER_USEC); 542 + tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC; 544 543 goto change; 545 544 } 546 545 break; ··· 551 552 case STATE_P_ON: 552 553 switch (ctx->to_state) { 553 554 case STATE_TRX_OFF: 554 - tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC); 555 + tim = c->t_reset_to_off * NSEC_PER_USEC; 555 556 goto change; 556 557 default: 557 558 break;
+1 -1
drivers/net/usb/cdc_ncm.c
··· 1282 1282 /* start timer, if not already started */ 1283 1283 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) 1284 1284 hrtimer_start(&ctx->tx_timer, 1285 - ktime_set(0, ctx->timer_interval), 1285 + ctx->timer_interval, 1286 1286 HRTIMER_MODE_REL); 1287 1287 } 1288 1288
+2 -2
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
··· 177 177 if (rt2800usb_txstatus_pending(rt2x00dev)) { 178 178 /* Read register after 1 ms */ 179 179 hrtimer_start(&rt2x00dev->txstatus_timer, 180 - ktime_set(0, TXSTATUS_READ_INTERVAL), 180 + TXSTATUS_READ_INTERVAL, 181 181 HRTIMER_MODE_REL); 182 182 return false; 183 183 } ··· 204 204 205 205 /* Read TX_STA_FIFO register after 2 ms */ 206 206 hrtimer_start(&rt2x00dev->txstatus_timer, 207 - ktime_set(0, 2*TXSTATUS_READ_INTERVAL), 207 + 2 * TXSTATUS_READ_INTERVAL, 208 208 HRTIMER_MODE_REL); 209 209 } 210 210
+1 -1
drivers/pci/quirks.c
··· 3044 3044 static ktime_t fixup_debug_start(struct pci_dev *dev, 3045 3045 void (*fn)(struct pci_dev *dev)) 3046 3046 { 3047 - ktime_t calltime = ktime_set(0, 0); 3047 + ktime_t calltime = 0; 3048 3048 3049 3049 dev_dbg(&dev->dev, "calling %pF\n", fn); 3050 3050 if (initcall_debug) {
+1 -1
drivers/platform/x86/msi-wmi.c
··· 283 283 if (err) 284 284 goto err_free_keymap; 285 285 286 - last_pressed = ktime_set(0, 0); 286 + last_pressed = 0; 287 287 288 288 return 0; 289 289
+1 -1
drivers/power/reset/ltc2952-poweroff.c
··· 169 169 170 170 static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) 171 171 { 172 - data->wde_interval = ktime_set(0, 300L*1E6L); 172 + data->wde_interval = 300L * 1E6L; 173 173 data->trigger_delay = ktime_set(2, 500L*1E6L); 174 174 175 175 hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+8 -8
drivers/rtc/interface.c
··· 363 363 rtc_timer_remove(rtc, &rtc->aie_timer); 364 364 365 365 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 366 - rtc->aie_timer.period = ktime_set(0, 0); 366 + rtc->aie_timer.period = 0; 367 367 if (alarm->enabled) 368 368 err = rtc_timer_enqueue(rtc, &rtc->aie_timer); 369 369 ··· 391 391 return err; 392 392 393 393 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 394 - rtc->aie_timer.period = ktime_set(0, 0); 394 + rtc->aie_timer.period = 0; 395 395 396 396 /* Alarm has to be enabled & in the future for us to enqueue it */ 397 - if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 < 398 - rtc->aie_timer.node.expires.tv64)) { 397 + if (alarm->enabled && (rtc_tm_to_ktime(now) < 398 + rtc->aie_timer.node.expires)) { 399 399 400 400 rtc->aie_timer.enabled = 1; 401 401 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); ··· 554 554 int count; 555 555 rtc = container_of(timer, struct rtc_device, pie_timer); 556 556 557 - period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); 557 + period = NSEC_PER_SEC / rtc->irq_freq; 558 558 count = hrtimer_forward_now(timer, period); 559 559 560 560 rtc_handle_legacy_irq(rtc, count, RTC_PF); ··· 665 665 return -1; 666 666 667 667 if (enabled) { 668 - ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); 668 + ktime_t period = NSEC_PER_SEC / rtc->irq_freq; 669 669 670 670 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); 671 671 } ··· 766 766 767 767 /* Skip over expired timers */ 768 768 while (next) { 769 - if (next->expires.tv64 >= now.tv64) 769 + if (next->expires >= now) 770 770 break; 771 771 next = timerqueue_iterate_next(next); 772 772 } ··· 858 858 __rtc_read_time(rtc, &tm); 859 859 now = rtc_tm_to_ktime(tm); 860 860 while ((next = timerqueue_getnext(&rtc->timerqueue))) { 861 - if (next->expires.tv64 > now.tv64) 861 + if (next->expires > now) 862 862 break; 863 863 864 864 /* expire timer */
+2 -2
drivers/s390/crypto/ap_bus.c
··· 333 333 case AP_WAIT_TIMEOUT: 334 334 spin_lock_bh(&ap_poll_timer_lock); 335 335 if (!hrtimer_is_queued(&ap_poll_timer)) { 336 - hr_time = ktime_set(0, poll_timeout); 336 + hr_time = poll_timeout; 337 337 hrtimer_forward_now(&ap_poll_timer, hr_time); 338 338 hrtimer_restart(&ap_poll_timer); 339 339 } ··· 860 860 time > 120000000000ULL) 861 861 return -EINVAL; 862 862 poll_timeout = time; 863 - hr_time = ktime_set(0, poll_timeout); 863 + hr_time = poll_timeout; 864 864 865 865 spin_lock_bh(&ap_poll_timer_lock); 866 866 hrtimer_cancel(&ap_poll_timer);
+1 -1
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 1694 1694 if (!vscsi->rsp_q_timer.started) { 1695 1695 if (vscsi->rsp_q_timer.timer_pops < 1696 1696 MAX_TIMER_POPS) { 1697 - kt = ktime_set(0, WAIT_NANO_SECONDS); 1697 + kt = WAIT_NANO_SECONDS; 1698 1698 } else { 1699 1699 /* 1700 1700 * slide the timeslice if the maximum
+1 -1
drivers/scsi/scsi_debug.c
··· 4085 4085 jiffies_to_timespec(delta_jiff, &ts); 4086 4086 kt = ktime_set(ts.tv_sec, ts.tv_nsec); 4087 4087 } else 4088 - kt = ktime_set(0, sdebug_ndelay); 4088 + kt = sdebug_ndelay; 4089 4089 if (NULL == sd_dp) { 4090 4090 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 4091 4091 if (NULL == sd_dp)
+2 -2
drivers/scsi/ufs/ufshcd.c
··· 930 930 if (!hba->outstanding_reqs && scaling->is_busy_started) { 931 931 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), 932 932 scaling->busy_start_t)); 933 - scaling->busy_start_t = ktime_set(0, 0); 933 + scaling->busy_start_t = 0; 934 934 scaling->is_busy_started = false; 935 935 } 936 936 } ··· 6661 6661 scaling->busy_start_t = ktime_get(); 6662 6662 scaling->is_busy_started = true; 6663 6663 } else { 6664 - scaling->busy_start_t = ktime_set(0, 0); 6664 + scaling->busy_start_t = 0; 6665 6665 scaling->is_busy_started = false; 6666 6666 } 6667 6667 spin_unlock_irqrestore(hba->host->host_lock, flags);
+7 -7
drivers/usb/chipidea/otg_fsm.c
··· 234 234 ktime_set(timer_sec, timer_nsec)); 235 235 ci->enabled_otg_timer_bits |= (1 << t); 236 236 if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) || 237 - (ci->hr_timeouts[ci->next_otg_timer].tv64 > 238 - ci->hr_timeouts[t].tv64)) { 237 + (ci->hr_timeouts[ci->next_otg_timer] > 238 + ci->hr_timeouts[t])) { 239 239 ci->next_otg_timer = t; 240 240 hrtimer_start_range_ns(&ci->otg_fsm_hrtimer, 241 241 ci->hr_timeouts[t], NSEC_PER_MSEC, ··· 269 269 for_each_set_bit(cur_timer, &enabled_timer_bits, 270 270 NUM_OTG_FSM_TIMERS) { 271 271 if ((next_timer == NUM_OTG_FSM_TIMERS) || 272 - (ci->hr_timeouts[next_timer].tv64 < 273 - ci->hr_timeouts[cur_timer].tv64)) 272 + (ci->hr_timeouts[next_timer] < 273 + ci->hr_timeouts[cur_timer])) 274 274 next_timer = cur_timer; 275 275 } 276 276 } ··· 397 397 398 398 now = ktime_get(); 399 399 for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) { 400 - if (now.tv64 >= ci->hr_timeouts[cur_timer].tv64) { 400 + if (now >= ci->hr_timeouts[cur_timer]) { 401 401 ci->enabled_otg_timer_bits &= ~(1 << cur_timer); 402 402 if (otg_timer_handlers[cur_timer]) 403 403 ret = otg_timer_handlers[cur_timer](ci); 404 404 } else { 405 405 if ((next_timer == NUM_OTG_FSM_TIMERS) || 406 - (ci->hr_timeouts[cur_timer].tv64 < 407 - ci->hr_timeouts[next_timer].tv64)) 406 + (ci->hr_timeouts[cur_timer] < 407 + ci->hr_timeouts[next_timer])) 408 408 next_timer = cur_timer; 409 409 } 410 410 }
+1 -2
drivers/usb/gadget/function/f_ncm.c
··· 1113 1113 } 1114 1114 1115 1115 /* Delay the timer. */ 1116 - hrtimer_start(&ncm->task_timer, 1117 - ktime_set(0, TX_TIMEOUT_NSECS), 1116 + hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS, 1118 1117 HRTIMER_MODE_REL); 1119 1118 1120 1119 /* Add the datagram position entries */
+2 -3
drivers/usb/host/ehci-timer.c
··· 88 88 ktime_t *timeout = &ehci->hr_timeouts[event]; 89 89 90 90 if (resched) 91 - *timeout = ktime_add(ktime_get(), 92 - ktime_set(0, event_delays_ns[event])); 91 + *timeout = ktime_add(ktime_get(), event_delays_ns[event]); 93 92 ehci->enabled_hrtimer_events |= (1 << event); 94 93 95 94 /* Track only the lowest-numbered pending event */ ··· 424 425 */ 425 426 now = ktime_get(); 426 427 for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { 427 - if (now.tv64 >= ehci->hr_timeouts[e].tv64) 428 + if (now >= ehci->hr_timeouts[e]) 428 429 event_handlers[e](ehci); 429 430 else 430 431 ehci_enable_event(ehci, e, false);
+2 -3
drivers/usb/host/fotg210-hcd.c
··· 1080 1080 ktime_t *timeout = &fotg210->hr_timeouts[event]; 1081 1081 1082 1082 if (resched) 1083 - *timeout = ktime_add(ktime_get(), 1084 - ktime_set(0, event_delays_ns[event])); 1083 + *timeout = ktime_add(ktime_get(), event_delays_ns[event]); 1085 1084 fotg210->enabled_hrtimer_events |= (1 << event); 1086 1085 1087 1086 /* Track only the lowest-numbered pending event */ ··· 1380 1381 */ 1381 1382 now = ktime_get(); 1382 1383 for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) { 1383 - if (now.tv64 >= fotg210->hr_timeouts[e].tv64) 1384 + if (now >= fotg210->hr_timeouts[e]) 1384 1385 event_handlers[e](fotg210); 1385 1386 else 1386 1387 fotg210_enable_event(fotg210, e, false);
+4 -5
drivers/usb/musb/musb_cppi41.c
··· 197 197 if (!list_empty(&controller->early_tx_list) && 198 198 !hrtimer_is_queued(&controller->early_tx)) { 199 199 ret = HRTIMER_RESTART; 200 - hrtimer_forward_now(&controller->early_tx, 201 - ktime_set(0, 20 * NSEC_PER_USEC)); 200 + hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC); 202 201 } 203 202 204 203 spin_unlock_irqrestore(&musb->lock, flags); ··· 279 280 unsigned long usecs = cppi41_channel->total_len / 10; 280 281 281 282 hrtimer_start_range_ns(&controller->early_tx, 282 - ktime_set(0, usecs * NSEC_PER_USEC), 283 - 20 * NSEC_PER_USEC, 284 - HRTIMER_MODE_REL); 283 + usecs * NSEC_PER_USEC, 284 + 20 * NSEC_PER_USEC, 285 + HRTIMER_MODE_REL); 285 286 } 286 287 287 288 out:
+2 -2
fs/aio.c
··· 1285 1285 struct io_event __user *event, 1286 1286 struct timespec __user *timeout) 1287 1287 { 1288 - ktime_t until = { .tv64 = KTIME_MAX }; 1288 + ktime_t until = KTIME_MAX; 1289 1289 long ret = 0; 1290 1290 1291 1291 if (timeout) { ··· 1311 1311 * the ringbuffer empty. So in practice we should be ok, but it's 1312 1312 * something to be aware of when touching this code. 1313 1313 */ 1314 - if (until.tv64 == 0) 1314 + if (until == 0) 1315 1315 aio_read_events(ctx, min_nr, nr, event, &ret); 1316 1316 else 1317 1317 wait_event_interruptible_hrtimeout(ctx->wait,
+2 -3
fs/dlm/lock.c
··· 1395 1395 void dlm_scan_waiters(struct dlm_ls *ls) 1396 1396 { 1397 1397 struct dlm_lkb *lkb; 1398 - ktime_t zero = ktime_set(0, 0); 1399 1398 s64 us; 1400 1399 s64 debug_maxus = 0; 1401 1400 u32 debug_scanned = 0; ··· 1408 1409 mutex_lock(&ls->ls_waiters_mutex); 1409 1410 1410 1411 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { 1411 - if (ktime_equal(lkb->lkb_wait_time, zero)) 1412 + if (!lkb->lkb_wait_time) 1412 1413 continue; 1413 1414 1414 1415 debug_scanned++; ··· 1418 1419 if (us < dlm_config.ci_waitwarn_us) 1419 1420 continue; 1420 1421 1421 - lkb->lkb_wait_time = zero; 1422 + lkb->lkb_wait_time = 0; 1422 1423 1423 1424 debug_expired++; 1424 1425 if (us > debug_maxus)
+1 -1
fs/gfs2/glock.c
··· 695 695 gl->gl_target = LM_ST_UNLOCKED; 696 696 gl->gl_demote_state = LM_ST_EXCLUSIVE; 697 697 gl->gl_ops = glops; 698 - gl->gl_dstamp = ktime_set(0, 0); 698 + gl->gl_dstamp = 0; 699 699 preempt_disable(); 700 700 /* We use the global stats to estimate the initial per-glock stats */ 701 701 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
+1 -2
fs/nfs/flexfilelayout/flexfilelayout.c
··· 619 619 struct nfs4_ff_layoutstat *layoutstat, 620 620 ktime_t now) 621 621 { 622 - static const ktime_t notime = {0}; 623 622 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 624 623 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); 625 624 626 625 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 627 - if (ktime_equal(mirror->start_time, notime)) 626 + if (!mirror->start_time) 628 627 mirror->start_time = now; 629 628 if (mirror->report_interval != 0) 630 629 report_interval = (s64)mirror->report_interval * 1000LL;
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 1250 1250 1251 1251 mlog(ML_HEARTBEAT, 1252 1252 "start = %lld, end = %lld, msec = %u, ret = %d\n", 1253 - before_hb.tv64, after_hb.tv64, elapsed_msec, ret); 1253 + before_hb, after_hb, elapsed_msec, ret); 1254 1254 1255 1255 if (!kthread_should_stop() && 1256 1256 elapsed_msec < reg->hr_timeout_ms) {
+13 -13
fs/timerfd.c
··· 55 55 /* 56 56 * This gets called when the timer event triggers. We set the "expired" 57 57 * flag, but we do not re-arm the timer (in case it's necessary, 58 - * tintv.tv64 != 0) until the timer is accessed. 58 + * tintv != 0) until the timer is accessed. 59 59 */ 60 60 static void timerfd_triggered(struct timerfd_ctx *ctx) 61 61 { ··· 93 93 */ 94 94 void timerfd_clock_was_set(void) 95 95 { 96 - ktime_t moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 96 + ktime_t moffs = ktime_mono_to_real(0); 97 97 struct timerfd_ctx *ctx; 98 98 unsigned long flags; 99 99 ··· 102 102 if (!ctx->might_cancel) 103 103 continue; 104 104 spin_lock_irqsave(&ctx->wqh.lock, flags); 105 - if (ctx->moffs.tv64 != moffs.tv64) { 106 - ctx->moffs.tv64 = KTIME_MAX; 105 + if (ctx->moffs != moffs) { 106 + ctx->moffs = KTIME_MAX; 107 107 ctx->ticks++; 108 108 wake_up_locked(&ctx->wqh); 109 109 } ··· 124 124 125 125 static bool timerfd_canceled(struct timerfd_ctx *ctx) 126 126 { 127 - if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) 127 + if (!ctx->might_cancel || ctx->moffs != KTIME_MAX) 128 128 return false; 129 - ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 129 + ctx->moffs = ktime_mono_to_real(0); 130 130 return true; 131 131 } 132 132 ··· 155 155 else 156 156 remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr); 157 157 158 - return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; 158 + return remaining < 0 ? 0: remaining; 159 159 } 160 160 161 161 static int timerfd_setup(struct timerfd_ctx *ctx, int flags, ··· 184 184 ctx->t.tmr.function = timerfd_tmrproc; 185 185 } 186 186 187 - if (texp.tv64 != 0) { 187 + if (texp != 0) { 188 188 if (isalarm(ctx)) { 189 189 if (flags & TFD_TIMER_ABSTIME) 190 190 alarm_start(&ctx->t.alarm, texp); ··· 261 261 if (ctx->ticks) { 262 262 ticks = ctx->ticks; 263 263 264 - if (ctx->expired && ctx->tintv.tv64) { 264 + if (ctx->expired && ctx->tintv) { 265 265 /* 266 - * If tintv.tv64 != 0, this is a periodic timer that 266 + * If tintv != 0, this is a periodic timer that 267 267 * needs to be re-armed. We avoid doing it in the timer 268 268 * callback to avoid DoS attacks specifying a very 269 269 * short timer period. ··· 410 410 else 411 411 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); 412 412 413 - ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); 413 + ctx->moffs = ktime_mono_to_real(0); 414 414 415 415 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, 416 416 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); ··· 469 469 * We do not update "ticks" and "expired" since the timer will be 470 470 * re-programmed again in the following timerfd_setup() call. 471 471 */ 472 - if (ctx->expired && ctx->tintv.tv64) { 472 + if (ctx->expired && ctx->tintv) { 473 473 if (isalarm(ctx)) 474 474 alarm_forward_now(&ctx->t.alarm, ctx->tintv); 475 475 else ··· 499 499 ctx = f.file->private_data; 500 500 501 501 spin_lock_irq(&ctx->wqh.lock); 502 - if (ctx->expired && ctx->tintv.tv64) { 502 + if (ctx->expired && ctx->tintv) { 503 503 ctx->expired = 0; 504 504 505 505 if (isalarm(ctx)) {
+2 -2
include/kvm/arm_arch_timer.h
··· 25 25 26 26 struct arch_timer_kvm { 27 27 /* Virtual offset */ 28 - cycle_t cntvoff; 28 + u64 cntvoff; 29 29 }; 30 30 31 31 struct arch_timer_cpu { 32 32 /* Registers: control register, timer value */ 33 33 u32 cntv_ctl; /* Saved/restored */ 34 - cycle_t cntv_cval; /* Saved/restored */ 34 + u64 cntv_cval; /* Saved/restored */ 35 35 36 36 /* 37 37 * Anything that is not used directly from assembly code goes
+11 -11
include/linux/clocksource.h
··· 75 75 * structure. 76 76 */ 77 77 struct clocksource { 78 - cycle_t (*read)(struct clocksource *cs); 79 - cycle_t mask; 78 + u64 (*read)(struct clocksource *cs); 79 + u64 mask; 80 80 u32 mult; 81 81 u32 shift; 82 82 u64 max_idle_ns; ··· 98 98 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 99 99 /* Watchdog related data, used by the framework */ 100 100 struct list_head wd_list; 101 - cycle_t cs_last; 102 - cycle_t wd_last; 101 + u64 cs_last; 102 + u64 wd_last; 103 103 #endif 104 104 struct module *owner; 105 105 }; ··· 117 117 #define CLOCK_SOURCE_RESELECT 0x100 118 118 119 119 /* simplify initialization of mask field */ 120 - #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 120 + #define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 121 121 122 122 static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) 123 123 { ··· 176 176 * 177 177 * XXX - This could use some mult_lxl_ll() asm optimization 178 178 */ 179 - static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) 179 + static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) 180 180 { 181 181 return ((u64) cycles * mult) >> shift; 182 182 } ··· 236 236 237 237 extern int timekeeping_notify(struct clocksource *clock); 238 238 239 - extern cycle_t clocksource_mmio_readl_up(struct clocksource *); 240 - extern cycle_t clocksource_mmio_readl_down(struct clocksource *); 241 - extern cycle_t clocksource_mmio_readw_up(struct clocksource *); 242 - extern cycle_t clocksource_mmio_readw_down(struct clocksource *); 239 + extern u64 clocksource_mmio_readl_up(struct clocksource *); 240 + extern u64 clocksource_mmio_readl_down(struct clocksource *); 241 + extern u64 clocksource_mmio_readw_up(struct clocksource *); 242 + extern u64 clocksource_mmio_readw_down(struct clocksource *); 243 243 244 244 extern int clocksource_mmio_init(void __iomem *, const char *, 245 - unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); 245 + unsigned long, int, unsigned, u64 (*)(struct clocksource *)); 246 246 247 247 extern int clocksource_i8253_init(void); 248 248
+1 -1
include/linux/dw_apb_timer.h
··· 50 50 unsigned long freq); 51 51 void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); 52 52 void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); 53 - cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); 53 + u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); 54 54 55 55 #endif /* __DW_APB_TIMER_H__ */
+2 -2
include/linux/futex.h
··· 1 1 #ifndef _LINUX_FUTEX_H 2 2 #define _LINUX_FUTEX_H 3 3 4 + #include <linux/ktime.h> 4 5 #include <uapi/linux/futex.h> 5 6 6 7 struct inode; 7 8 struct mm_struct; 8 9 struct task_struct; 9 - union ktime; 10 10 11 - long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, 11 + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 12 12 u32 __user *uaddr2, u32 val2, u32 val3); 13 13 14 14 extern int
+6 -6
include/linux/hrtimer.h
··· 228 228 229 229 static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) 230 230 { 231 - timer->node.expires.tv64 = tv64; 232 - timer->_softexpires.tv64 = tv64; 231 + timer->node.expires = tv64; 232 + timer->_softexpires = tv64; 233 233 } 234 234 235 235 static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) ··· 256 256 257 257 static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) 258 258 { 259 - return timer->node.expires.tv64; 259 + return timer->node.expires; 260 260 } 261 261 static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) 262 262 { 263 - return timer->_softexpires.tv64; 263 + return timer->_softexpires; 264 264 } 265 265 266 266 static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) ··· 297 297 * this resolution values. 298 298 */ 299 299 # define HIGH_RES_NSEC 1 300 - # define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } 300 + # define KTIME_HIGH_RES (HIGH_RES_NSEC) 301 301 # define MONOTONIC_RES_NSEC HIGH_RES_NSEC 302 302 # define KTIME_MONOTONIC_RES KTIME_HIGH_RES 303 303 ··· 333 333 * hrtimer_start_range_ns() to prevent short timeouts. 334 334 */ 335 335 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) 336 - rem.tv64 -= hrtimer_resolution; 336 + rem -= hrtimer_resolution; 337 337 return rem; 338 338 } 339 339
+4 -4
include/linux/irqchip/mips-gic.h
··· 259 259 unsigned long gic_addrspace_size, unsigned int cpu_vec, 260 260 unsigned int irqbase); 261 261 extern void gic_clocksource_init(unsigned int); 262 - extern cycle_t gic_read_count(void); 262 + extern u64 gic_read_count(void); 263 263 extern unsigned int gic_get_count_width(void); 264 - extern cycle_t gic_read_compare(void); 265 - extern void gic_write_compare(cycle_t cnt); 266 - extern void gic_write_cpu_compare(cycle_t cnt, int cpu); 264 + extern u64 gic_read_compare(void); 265 + extern void gic_write_compare(u64 cnt); 266 + extern void gic_write_cpu_compare(u64 cnt, int cpu); 267 267 extern void gic_start_count(void); 268 268 extern void gic_stop_count(void); 269 269 extern int gic_get_c0_compare_int(void);
+22 -59
include/linux/ktime.h
··· 24 24 #include <linux/time.h> 25 25 #include <linux/jiffies.h> 26 26 27 - /* 28 - * ktime_t: 29 - * 30 - * A single 64-bit variable is used to store the hrtimers 31 - * internal representation of time values in scalar nanoseconds. The 32 - * design plays out best on 64-bit CPUs, where most conversions are 33 - * NOPs and most arithmetic ktime_t operations are plain arithmetic 34 - * operations. 35 - * 36 - */ 37 - union ktime { 38 - s64 tv64; 39 - }; 40 - 41 - typedef union ktime ktime_t; /* Kill this */ 27 + /* Nanosecond scalar representation for kernel time values */ 28 + typedef s64 ktime_t; 42 29 43 30 /** 44 31 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value ··· 37 50 static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) 38 51 { 39 52 if (unlikely(secs >= KTIME_SEC_MAX)) 40 - return (ktime_t){ .tv64 = KTIME_MAX }; 53 + return KTIME_MAX; 41 54 42 - return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; 55 + return secs * NSEC_PER_SEC + (s64)nsecs; 43 56 } 44 57 45 58 /* Subtract two ktime_t variables. rem = lhs -rhs: */ 46 - #define ktime_sub(lhs, rhs) \ 47 - ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; }) 59 + #define ktime_sub(lhs, rhs) ((lhs) - (rhs)) 48 60 49 61 /* Add two ktime_t variables. res = lhs + rhs: */ 50 - #define ktime_add(lhs, rhs) \ 51 - ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) 62 + #define ktime_add(lhs, rhs) ((lhs) + (rhs)) 52 63 53 64 /* 54 65 * Same as ktime_add(), but avoids undefined behaviour on overflow; however, 55 66 * this means that you must check the result for overflow yourself. 56 67 */ 57 - #define ktime_add_unsafe(lhs, rhs) \ 58 - ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) 68 + #define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) 59 69 60 70 /* 61 71 * Add a ktime_t variable and a scalar nanosecond value. 62 72 * res = kt + nsval: 63 73 */ 64 - #define ktime_add_ns(kt, nsval) \ 65 - ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) 74 + #define ktime_add_ns(kt, nsval) ((kt) + (nsval)) 66 75 67 76 /* 68 77 * Subtract a scalar nanosecod from a ktime_t variable 69 78 * res = kt - nsval: 70 79 */ 71 - #define ktime_sub_ns(kt, nsval) \ 72 - ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) 80 + #define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) 73 81 74 82 /* convert a timespec to ktime_t format: */ 75 83 static inline ktime_t timespec_to_ktime(struct timespec ts) ··· 85 103 } 86 104 87 105 /* Map the ktime_t to timespec conversion to ns_to_timespec function */ 88 - #define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 106 + #define ktime_to_timespec(kt) ns_to_timespec((kt)) 89 107 90 108 /* Map the ktime_t to timespec conversion to ns_to_timespec function */ 91 - #define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) 109 + #define ktime_to_timespec64(kt) ns_to_timespec64((kt)) 92 110 93 111 /* Map the ktime_t to timeval conversion to ns_to_timeval function */ 94 - #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 112 + #define ktime_to_timeval(kt) ns_to_timeval((kt)) 95 113 96 114 /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 97 - #define ktime_to_ns(kt) ((kt).tv64) 98 - 99 - 100 - /** 101 - * ktime_equal - Compares two ktime_t variables to see if they are equal 102 - * @cmp1: comparable1 103 - * @cmp2: comparable2 104 - * 105 - * Compare two ktime_t variables. 106 - * 107 - * Return: 1 if equal. 108 - */ 109 - static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) 110 - { 111 - return cmp1.tv64 == cmp2.tv64; 112 - } 115 + #define ktime_to_ns(kt) (kt) 113 116 114 117 /** 115 118 * ktime_compare - Compares two ktime_t variables for less, greater or equal ··· 108 141 */ 109 142 static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) 110 143 { 111 - if (cmp1.tv64 < cmp2.tv64) 144 + if (cmp1 < cmp2) 112 145 return -1; 113 - if (cmp1.tv64 > cmp2.tv64) 146 + if (cmp1 > cmp2) 114 147 return 1; 115 148 return 0; 116 149 } ··· 149 182 */ 150 183 BUG_ON(div < 0); 151 184 if (__builtin_constant_p(div) && !(div >> 32)) { 152 - s64 ns = kt.tv64; 185 + s64 ns = kt; 153 186 u64 tmp = ns < 0 ? -ns : ns; 154 187 155 188 do_div(tmp, div); ··· 166 199 * so catch them on 64bit as well. 167 200 */ 168 201 WARN_ON(div < 0); 169 - return kt.tv64 / div; 202 + return kt / div; 170 203 } 171 204 #endif 172 205 ··· 223 256 static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, 224 257 struct timespec *ts) 225 258 { 226 - if (kt.tv64) { 259 + if (kt) { 227 260 *ts = ktime_to_timespec(kt); 228 261 return true; 229 262 } else { ··· 242 275 static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, 243 276 struct timespec64 *ts) 244 277 { 245 - if (kt.tv64) { 278 + if (kt) { 246 279 *ts = ktime_to_timespec64(kt); 247 280 return true; 248 281 } else { ··· 257 290 * this resolution values. 258 291 */ 259 292 #define LOW_RES_NSEC TICK_NSEC 260 - #define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } 293 + #define KTIME_LOW_RES (LOW_RES_NSEC) 261 294 262 295 static inline ktime_t ns_to_ktime(u64 ns) 263 296 { 264 - static const ktime_t ktime_zero = { .tv64 = 0 }; 265 - 266 - return ktime_add_ns(ktime_zero, ns); 297 + return ns; 267 298 } 268 299 269 300 static inline ktime_t ms_to_ktime(u64 ms) 270 301 { 271 - static const ktime_t ktime_zero = { .tv64 = 0 }; 272 - 273 - return ktime_add_ms(ktime_zero, ms); 302 + return ms * NSEC_PER_MSEC; 274 303 } 275 304 276 305 # include <linux/timekeeping.h>
+1 -1
include/linux/mlx4/device.h
··· 1460 1460 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1461 1461 u32 max_range_qpn); 1462 1462 1463 - cycle_t mlx4_read_clock(struct mlx4_dev *dev); 1463 + u64 mlx4_read_clock(struct mlx4_dev *dev); 1464 1464 1465 1465 struct mlx4_active_ports { 1466 1466 DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
+1 -1
include/linux/skbuff.h
··· 3227 3227 3228 3228 static inline ktime_t net_invalid_timestamp(void) 3229 3229 { 3230 - return ktime_set(0, 0); 3230 + return 0; 3231 3231 } 3232 3232 3233 3233 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
+1 -3
include/linux/tick.h
··· 127 127 128 128 static inline ktime_t tick_nohz_get_sleep_length(void) 129 129 { 130 - ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; 131 - 132 - return len; 130 + return NSEC_PER_SEC / HZ; 133 131 } 134 132 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 135 133 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
+6 -6
include/linux/timecounter.h
··· 20 20 #include <linux/types.h> 21 21 22 22 /* simplify initialization of mask field */ 23 - #define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 23 + #define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 24 24 25 25 /** 26 26 * struct cyclecounter - hardware abstraction for a free running counter ··· 37 37 * @shift: cycle to nanosecond divisor (power of two) 38 38 */ 39 39 struct cyclecounter { 40 - cycle_t (*read)(const struct cyclecounter *cc); 41 - cycle_t mask; 40 + u64 (*read)(const struct cyclecounter *cc); 41 + u64 mask; 42 42 u32 mult; 43 43 u32 shift; 44 44 }; ··· 63 63 */ 64 64 struct timecounter { 65 65 const struct cyclecounter *cc; 66 - cycle_t cycle_last; 66 + u64 cycle_last; 67 67 u64 nsec; 68 68 u64 mask; 69 69 u64 frac; ··· 77 77 * @frac: pointer to storage for the fractional nanoseconds. 78 78 */ 79 79 static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 80 - cycle_t cycles, u64 mask, u64 *frac) 80 + u64 cycles, u64 mask, u64 *frac) 81 81 { 82 82 u64 ns = (u64) cycles; 83 83 ··· 134 134 * in the past. 135 135 */ 136 136 extern u64 timecounter_cyc2time(struct timecounter *tc, 137 - cycle_t cycle_tstamp); 137 + u64 cycle_tstamp); 138 138 139 139 #endif
+5 -5
include/linux/timekeeper_internal.h
··· 29 29 */ 30 30 struct tk_read_base { 31 31 struct clocksource *clock; 32 - cycle_t (*read)(struct clocksource *cs); 33 - cycle_t mask; 34 - cycle_t cycle_last; 32 + u64 (*read)(struct clocksource *cs); 33 + u64 mask; 34 + u64 cycle_last; 35 35 u32 mult; 36 36 u32 shift; 37 37 u64 xtime_nsec; ··· 97 97 struct timespec64 raw_time; 98 98 99 99 /* The following members are for timekeeping internal use */ 100 - cycle_t cycle_interval; 100 + u64 cycle_interval; 101 101 u64 xtime_interval; 102 102 s64 xtime_remainder; 103 103 u32 raw_interval; ··· 136 136 137 137 extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, 138 138 struct clocksource *c, u32 mult, 139 - cycle_t cycle_last); 139 + u64 cycle_last); 140 140 extern void update_vsyscall_tz(void); 141 141 142 142 #else
+2 -2
include/linux/timekeeping.h
··· 293 293 * @cs_was_changed_seq: The sequence number of clocksource change events 294 294 */ 295 295 struct system_time_snapshot { 296 - cycle_t cycles; 296 + u64 cycles; 297 297 ktime_t real; 298 298 ktime_t raw; 299 299 unsigned int clock_was_set_seq; ··· 321 321 * timekeeping code to verify comparibility of two cycle values 322 322 */ 323 323 struct system_counterval_t { 324 - cycle_t cycles; 324 + u64 cycles; 325 325 struct clocksource *cs; 326 326 }; 327 327
-3
include/linux/types.h
··· 228 228 typedef void (*rcu_callback_t)(struct rcu_head *head); 229 229 typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); 230 230 231 - /* clocksource cycle base type */ 232 - typedef u64 cycle_t; 233 - 234 231 #endif /* __ASSEMBLY__ */ 235 232 #endif /* _LINUX_TYPES_H */
+1 -1
include/linux/wait.h
··· 510 510 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 511 511 HRTIMER_MODE_REL); \ 512 512 hrtimer_init_sleeper(&__t, current); \ 513 - if ((timeout).tv64 != KTIME_MAX) \ 513 + if ((timeout) != KTIME_MAX) \ 514 514 hrtimer_start_range_ns(&__t.timer, timeout, \ 515 515 current->timer_slack_ns, \ 516 516 HRTIMER_MODE_REL); \
+2 -2
include/net/red.h
··· 207 207 208 208 static inline int red_is_idling(const struct red_vars *v) 209 209 { 210 - return v->qidlestart.tv64 != 0; 210 + return v->qidlestart != 0; 211 211 } 212 212 213 213 static inline void red_start_of_idle_period(struct red_vars *v) ··· 217 217 218 218 static inline void red_end_of_idle_period(struct red_vars *v) 219 219 { 220 - v->qidlestart.tv64 = 0; 220 + v->qidlestart = 0; 221 221 } 222 222 223 223 static inline void red_restart(struct red_vars *v)
+2 -2
include/net/sock.h
··· 2193 2193 */ 2194 2194 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2195 2195 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2196 - (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2197 - (hwtstamps->hwtstamp.tv64 && 2196 + (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2197 + (hwtstamps->hwtstamp && 2198 2198 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2199 2199 __sock_recv_timestamp(msg, sk, skb); 2200 2200 else
+3 -3
include/trace/events/alarmtimer.h
··· 31 31 ), 32 32 33 33 TP_fast_assign( 34 - __entry->expires = expires.tv64; 34 + __entry->expires = expires; 35 35 __entry->alarm_type = flag; 36 36 ), 37 37 ··· 57 57 TP_fast_assign( 58 58 __entry->alarm = alarm; 59 59 __entry->alarm_type = alarm->type; 60 - __entry->expires = alarm->node.expires.tv64; 61 - __entry->now = now.tv64; 60 + __entry->expires = alarm->node.expires; 61 + __entry->now = now; 62 62 ), 63 63 64 64 TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu",
+7 -9
include/trace/events/timer.h
··· 177 177 TP_fast_assign( 178 178 __entry->hrtimer = hrtimer; 179 179 __entry->function = hrtimer->function; 180 - __entry->expires = hrtimer_get_expires(hrtimer).tv64; 181 - __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; 180 + __entry->expires = hrtimer_get_expires(hrtimer); 181 + __entry->softexpires = hrtimer_get_softexpires(hrtimer); 182 182 ), 183 183 184 184 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", 185 185 __entry->hrtimer, __entry->function, 186 - (unsigned long long)ktime_to_ns((ktime_t) { 187 - .tv64 = __entry->expires }), 188 - (unsigned long long)ktime_to_ns((ktime_t) { 189 - .tv64 = __entry->softexpires })) 186 + (unsigned long long) __entry->expires, 187 + (unsigned long long) __entry->softexpires) 190 188 ); 191 189 192 190 /** ··· 209 211 210 212 TP_fast_assign( 211 213 __entry->hrtimer = hrtimer; 212 - __entry->now = now->tv64; 214 + __entry->now = *now; 213 215 __entry->function = hrtimer->function; 214 216 ), 215 217 216 218 TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, 217 - (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) 218 - ); 219 + (unsigned long long) __entry->now) 220 + ); 219 221 220 222 DECLARE_EVENT_CLASS(hrtimer_class, 221 223
+2 -2
kernel/futex.c
··· 2459 2459 restart->fn = futex_wait_restart; 2460 2460 restart->futex.uaddr = uaddr; 2461 2461 restart->futex.val = val; 2462 - restart->futex.time = abs_time->tv64; 2462 + restart->futex.time = *abs_time; 2463 2463 restart->futex.bitset = bitset; 2464 2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; 2465 2465 ··· 2480 2480 ktime_t t, *tp = NULL; 2481 2481 2482 2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { 2483 - t.tv64 = restart->futex.time; 2483 + t = restart->futex.time; 2484 2484 tp = &t; 2485 2485 } 2486 2486 restart->fn = do_no_restart_syscall;
+1 -1
kernel/sched/core.c
··· 1456 1456 * yield - it could be a while. 1457 1457 */ 1458 1458 if (unlikely(queued)) { 1459 - ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1459 + ktime_t to = NSEC_PER_SEC / HZ; 1460 1460 1461 1461 set_current_state(TASK_UNINTERRUPTIBLE); 1462 1462 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
+3 -3
kernel/signal.c
··· 587 587 struct hrtimer *tmr = &tsk->signal->real_timer; 588 588 589 589 if (!hrtimer_is_queued(tmr) && 590 - tsk->signal->it_real_incr.tv64 != 0) { 590 + tsk->signal->it_real_incr != 0) { 591 591 hrtimer_forward(tmr, tmr->base->get_time(), 592 592 tsk->signal->it_real_incr); 593 593 hrtimer_restart(tmr); ··· 2766 2766 int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2767 2767 const struct timespec *ts) 2768 2768 { 2769 - ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; 2769 + ktime_t *to = NULL, timeout = KTIME_MAX; 2770 2770 struct task_struct *tsk = current; 2771 2771 sigset_t mask = *which; 2772 2772 int sig, ret = 0; ··· 2786 2786 2787 2787 spin_lock_irq(&tsk->sighand->siglock); 2788 2788 sig = dequeue_signal(tsk, &mask, info); 2789 - if (!sig && timeout.tv64) { 2789 + if (!sig && timeout) { 2790 2790 /* 2791 2791 * None ready, temporarily unblock those we're interested 2792 2792 * while we are sleeping in so that we'll be awakened when
+12 -12
kernel/time/alarmtimer.c
··· 234 234 min = freezer_delta; 235 235 expires = freezer_expires; 236 236 type = freezer_alarmtype; 237 - freezer_delta = ktime_set(0, 0); 237 + freezer_delta = 0; 238 238 spin_unlock_irqrestore(&freezer_delta_lock, flags); 239 239 240 240 rtc = alarmtimer_get_rtcdev(); ··· 254 254 if (!next) 255 255 continue; 256 256 delta = ktime_sub(next->expires, base->gettime()); 257 - if (!min.tv64 || (delta.tv64 < min.tv64)) { 257 + if (!min || (delta < min)) { 258 258 expires = next->expires; 259 259 min = delta; 260 260 type = i; 261 261 } 262 262 } 263 - if (min.tv64 == 0) 263 + if (min == 0) 264 264 return 0; 265 265 266 266 if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { ··· 277 277 now = ktime_add(now, min); 278 278 279 279 /* Set alarm, if in the past reject suspend briefly to handle */ 280 - ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); 280 + ret = rtc_timer_start(rtc, &rtctimer, now, 0); 281 281 if (ret < 0) 282 282 __pm_wakeup_event(ws, MSEC_PER_SEC); 283 283 return ret; ··· 328 328 delta = ktime_sub(absexp, base->gettime()); 329 329 330 330 spin_lock_irqsave(&freezer_delta_lock, flags); 331 - if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) { 331 + if (!freezer_delta || (delta < freezer_delta)) { 332 332 freezer_delta = delta; 333 333 freezer_expires = absexp; 334 334 freezer_alarmtype = type; ··· 453 453 454 454 delta = ktime_sub(now, alarm->node.expires); 455 455 456 - if (delta.tv64 < 0) 456 + if (delta < 0) 457 457 return 0; 458 458 459 - if (unlikely(delta.tv64 >= interval.tv64)) { 459 + if (unlikely(delta >= interval)) { 460 460 s64 incr = ktime_to_ns(interval); 461 461 462 462 overrun = ktime_divns(delta, incr); ··· 464 464 alarm->node.expires = ktime_add_ns(alarm->node.expires, 465 465 incr*overrun); 466 466 467 - if (alarm->node.expires.tv64 > now.tv64) 467 + if (alarm->node.expires > now) 468 468 return overrun; 469 469 /* 470 470 * This (and the ktime_add() below) is the ··· 522 522 } 523 523 524 524 /* Re-add periodic timers */ 525 - if (ptr->it.alarm.interval.tv64) { 525 + if (ptr->it.alarm.interval) { 526 526 ptr->it_overrun += alarm_forward(alarm, now, 527 527 ptr->it.alarm.interval); 528 528 result = ALARMTIMER_RESTART; ··· 730 730 731 731 rem = ktime_sub(exp, alarm_bases[type].gettime()); 732 732 733 - if (rem.tv64 <= 0) 733 + if (rem <= 0) 734 734 return 0; 735 735 rmt = ktime_to_timespec(rem); 736 736 ··· 755 755 struct alarm alarm; 756 756 int ret = 0; 757 757 758 - exp.tv64 = restart->nanosleep.expires; 758 + exp = restart->nanosleep.expires; 759 759 alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); 760 760 761 761 if (alarmtimer_do_nsleep(&alarm, exp)) ··· 835 835 restart = &current->restart_block; 836 836 restart->fn = alarm_timer_nsleep_restart; 837 837 restart->nanosleep.clockid = type; 838 - restart->nanosleep.expires = exp.tv64; 838 + restart->nanosleep.expires = exp; 839 839 restart->nanosleep.rmtp = rmtp; 840 840 ret = -ERESTART_RESTARTBLOCK; 841 841
+3 -3
kernel/time/clockevents.c
··· 179 179 void clockevents_shutdown(struct clock_event_device *dev) 180 180 { 181 181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 182 - dev->next_event.tv64 = KTIME_MAX; 182 + dev->next_event = KTIME_MAX; 183 183 } 184 184 185 185 /** ··· 213 213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 214 214 printk_deferred(KERN_WARNING 215 215 "CE: Reprogramming failure. Giving up\n"); 216 - dev->next_event.tv64 = KTIME_MAX; 216 + dev->next_event = KTIME_MAX; 217 217 return -ETIME; 218 218 } 219 219 ··· 310 310 int64_t delta; 311 311 int rc; 312 312 313 - if (unlikely(expires.tv64 < 0)) { 313 + if (unlikely(expires < 0)) { 314 314 WARN_ON_ONCE(1); 315 315 return -ETIME; 316 316 }
+1 -1
kernel/time/clocksource.c
··· 170 170 static void clocksource_watchdog(unsigned long data) 171 171 { 172 172 struct clocksource *cs; 173 - cycle_t csnow, wdnow, cslast, wdlast, delta; 173 + u64 csnow, wdnow, cslast, wdlast, delta; 174 174 int64_t wd_nsec, cs_nsec; 175 175 int next_cpu, reset_pending; 176 176
+27 -27
kernel/time/hrtimer.c
··· 171 171 return 0; 172 172 173 173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 174 - return expires.tv64 <= new_base->cpu_base->expires_next.tv64; 174 + return expires <= new_base->cpu_base->expires_next; 175 175 #else 176 176 return 0; 177 177 #endif ··· 313 313 * We use KTIME_SEC_MAX here, the maximum timeout which we can 314 314 * return to user space in a timespec: 315 315 */ 316 - if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) 316 + if (res < 0 || res < lhs || res < rhs) 317 317 res = ktime_set(KTIME_SEC_MAX, 0); 318 318 319 319 return res; ··· 465 465 static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 466 466 { 467 467 struct hrtimer_clock_base *base = cpu_base->clock_base; 468 - ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; 469 468 unsigned int active = cpu_base->active_bases; 469 + ktime_t expires, expires_next = KTIME_MAX; 470 470 471 471 hrtimer_update_next_timer(cpu_base, NULL); 472 472 for (; active; base++, active >>= 1) { ··· 479 479 next = timerqueue_getnext(&base->active); 480 480 timer = container_of(next, struct hrtimer, node); 481 481 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 482 - if (expires.tv64 < expires_next.tv64) { 482 + if (expires < expires_next) { 483 483 expires_next = expires; 484 484 hrtimer_update_next_timer(cpu_base, timer); 485 485 } ··· 489 489 * the clock bases so the result might be negative. Fix it up 490 490 * to prevent a false positive in clockevents_program_event(). 491 491 */ 492 - if (expires_next.tv64 < 0) 493 - expires_next.tv64 = 0; 492 + if (expires_next < 0) 493 + expires_next = 0; 494 494 return expires_next; 495 495 } 496 496 #endif ··· 561 561 562 562 expires_next = __hrtimer_get_next_event(cpu_base); 563 563 564 - if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 564 + if (skip_equal && expires_next == cpu_base->expires_next) 565 565 return; 566 566 567 - cpu_base->expires_next.tv64 = expires_next.tv64; 567 + cpu_base->expires_next = expires_next; 568 568 569 569 /* 570 570 * If a hang was detected in the last timer interrupt then we ··· 622 622 * CLOCK_REALTIME timer might be requested with an absolute 623 623 * expiry time which is less than base->offset. Set it to 0. 624 624 */ 625 - if (expires.tv64 < 0) 626 - expires.tv64 = 0; 625 + if (expires < 0) 626 + expires = 0; 627 627 628 - if (expires.tv64 >= cpu_base->expires_next.tv64) 628 + if (expires >= cpu_base->expires_next) 629 629 return; 630 630 631 631 /* Update the pointer to the next expiring timer */ ··· 653 653 */ 654 654 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 655 655 { 656 - base->expires_next.tv64 = KTIME_MAX; 656 + base->expires_next = KTIME_MAX; 657 657 base->hres_active = 0; 658 658 } 659 659 ··· 827 827 828 828 delta = ktime_sub(now, hrtimer_get_expires(timer)); 829 829 830 - if (delta.tv64 < 0) 830 + if (delta < 0) 831 831 return 0; 832 832 833 833 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 834 834 return 0; 835 835 836 - if (interval.tv64 < hrtimer_resolution) 837 - interval.tv64 = hrtimer_resolution; 836 + if (interval < hrtimer_resolution) 837 + interval = hrtimer_resolution; 838 838 839 - if (unlikely(delta.tv64 >= interval.tv64)) { 839 + if (unlikely(delta >= interval)) { 840 840 s64 incr = ktime_to_ns(interval); 841 841 842 842 orun = ktime_divns(delta, incr); 843 843 hrtimer_add_expires_ns(timer, incr * orun); 844 - if (hrtimer_get_expires_tv64(timer) > now.tv64) 844 + if (hrtimer_get_expires_tv64(timer) > now) 845 845 return orun; 846 846 /* 847 847 * This (and the ktime_add() below) is the ··· 955 955 */ 956 956 timer->is_rel = mode & HRTIMER_MODE_REL; 957 957 if (timer->is_rel) 958 - tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution)); 958 + tim = ktime_add_safe(tim, hrtimer_resolution); 959 959 #endif 960 960 return tim; 961 961 } ··· 1104 1104 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1105 1105 1106 1106 if (!__hrtimer_hres_active(cpu_base)) 1107 - expires = __hrtimer_get_next_event(cpu_base).tv64; 1107 + expires = __hrtimer_get_next_event(cpu_base); 1108 1108 1109 1109 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1110 1110 ··· 1296 1296 * are right-of a not yet expired timer, because that 1297 1297 * timer will have to trigger a wakeup anyway. 1298 1298 */ 1299 - if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) 1299 + if (basenow < hrtimer_get_softexpires_tv64(timer)) 1300 1300 break; 1301 1301 1302 1302 __run_hrtimer(cpu_base, base, timer, &basenow); ··· 1318 1318 1319 1319 BUG_ON(!cpu_base->hres_active); 1320 1320 cpu_base->nr_events++; 1321 - dev->next_event.tv64 = KTIME_MAX; 1321 + dev->next_event = KTIME_MAX; 1322 1322 1323 1323 raw_spin_lock(&cpu_base->lock); 1324 1324 entry_time = now = hrtimer_update_base(cpu_base); ··· 1331 1331 * timers which run their callback and need to be requeued on 1332 1332 * this CPU. 1333 1333 */ 1334 - cpu_base->expires_next.tv64 = KTIME_MAX; 1334 + cpu_base->expires_next = KTIME_MAX; 1335 1335 1336 1336 __hrtimer_run_queues(cpu_base, now); 1337 1337 ··· 1379 1379 cpu_base->hang_detected = 1; 1380 1380 raw_spin_unlock(&cpu_base->lock); 1381 1381 delta = ktime_sub(now, entry_time); 1382 - if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) 1383 - cpu_base->max_hang_time = (unsigned int) delta.tv64; 1382 + if ((unsigned int)delta > cpu_base->max_hang_time) 1383 + cpu_base->max_hang_time = (unsigned int) delta; 1384 1384 /* 1385 1385 * Limit it to a sensible value as we enforce a longer 1386 1386 * delay. Give the CPU at least 100ms to catch up. 1387 1387 */ 1388 - if (delta.tv64 > 100 * NSEC_PER_MSEC) 1388 + if (delta > 100 * NSEC_PER_MSEC) 1389 1389 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1390 1390 else 1391 1391 expires_next = ktime_add(now, delta); ··· 1495 1495 ktime_t rem; 1496 1496 1497 1497 rem = hrtimer_expires_remaining(timer); 1498 - if (rem.tv64 <= 0) 1498 + if (rem <= 0) 1499 1499 return 0; 1500 1500 rmt = ktime_to_timespec(rem); 1501 1501 ··· 1693 1693 * Optimize when a zero timeout value is given. It does not 1694 1694 * matter whether this is an absolute or a relative time. 1695 1695 */ 1696 - if (expires && !expires->tv64) { 1696 + if (expires && *expires == 0) { 1697 1697 __set_current_state(TASK_RUNNING); 1698 1698 return 0; 1699 1699 }
+5 -5
kernel/time/itimer.c
··· 34 34 * then we return 0 - which is correct. 35 35 */ 36 36 if (hrtimer_active(timer)) { 37 - if (rem.tv64 <= 0) 38 - rem.tv64 = NSEC_PER_USEC; 37 + if (rem <= 0) 38 + rem = NSEC_PER_USEC; 39 39 } else 40 - rem.tv64 = 0; 40 + rem = 0; 41 41 42 42 return ktime_to_timeval(rem); 43 43 } ··· 216 216 goto again; 217 217 } 218 218 expires = timeval_to_ktime(value->it_value); 219 - if (expires.tv64 != 0) { 219 + if (expires != 0) { 220 220 tsk->signal->it_real_incr = 221 221 timeval_to_ktime(value->it_interval); 222 222 hrtimer_start(timer, expires, HRTIMER_MODE_REL); 223 223 } else 224 - tsk->signal->it_real_incr.tv64 = 0; 224 + tsk->signal->it_real_incr = 0; 225 225 226 226 trace_itimer_state(ITIMER_REAL, value, 0); 227 227 spin_unlock_irq(&tsk->sighand->siglock);
+2 -2
kernel/time/jiffies.c
··· 59 59 #define JIFFIES_SHIFT 8 60 60 #endif 61 61 62 - static cycle_t jiffies_read(struct clocksource *cs) 62 + static u64 jiffies_read(struct clocksource *cs) 63 63 { 64 - return (cycle_t) jiffies; 64 + return (u64) jiffies; 65 65 } 66 66 67 67 static struct clocksource clocksource_jiffies = {
+1 -1
kernel/time/ntp.c
··· 381 381 382 382 if ((time_state == TIME_INS) && (time_status & STA_INS)) 383 383 return ktime_set(ntp_next_leap_sec, 0); 384 - ret.tv64 = KTIME_MAX; 384 + ret = KTIME_MAX; 385 385 return ret; 386 386 } 387 387
+11 -11
kernel/time/posix-timers.c
··· 359 359 { 360 360 struct hrtimer *timer = &timr->it.real.timer; 361 361 362 - if (timr->it.real.interval.tv64 == 0) 362 + if (timr->it.real.interval == 0) 363 363 return; 364 364 365 365 timr->it_overrun += (unsigned int) hrtimer_forward(timer, ··· 449 449 timr = container_of(timer, struct k_itimer, it.real.timer); 450 450 spin_lock_irqsave(&timr->it_lock, flags); 451 451 452 - if (timr->it.real.interval.tv64 != 0) 452 + if (timr->it.real.interval != 0) 453 453 si_private = ++timr->it_requeue_pending; 454 454 455 455 if (posix_timer_event(timr, si_private)) { ··· 458 458 * we will not get a call back to restart it AND 459 459 * it should be restarted. 460 460 */ 461 - if (timr->it.real.interval.tv64 != 0) { 461 + if (timr->it.real.interval != 0) { 462 462 ktime_t now = hrtimer_cb_get_time(timer); 463 463 464 464 /* ··· 485 485 */ 486 486 #ifdef CONFIG_HIGH_RES_TIMERS 487 487 { 488 - ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); 488 + ktime_t kj = NSEC_PER_SEC / HZ; 489 489 490 - if (timr->it.real.interval.tv64 < kj.tv64) 490 + if (timr->it.real.interval < kj) 491 491 now = ktime_add(now, kj); 492 492 } 493 493 #endif ··· 743 743 iv = timr->it.real.interval; 744 744 745 745 /* interval timer ? */ 746 - if (iv.tv64) 746 + if (iv) 747 747 cur_setting->it_interval = ktime_to_timespec(iv); 748 748 else if (!hrtimer_active(timer) && 749 749 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) ··· 756 756 * timer move the expiry time forward by intervals, so 757 757 * expiry is > now. 758 758 */ 759 - if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || 760 - (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 759 + if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || 760 + (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 761 761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 762 762 763 763 remaining = __hrtimer_expires_remaining_adjusted(timer, now); 764 764 /* Return 0 only, when the timer is expired and not pending */ 765 - if (remaining.tv64 <= 0) { 765 + if (remaining <= 0) { 766 766 /* 767 767 * A single shot SIGEV_NONE timer must return 0, when 768 768 * it is expired ! ··· 839 839 common_timer_get(timr, old_setting); 840 840 841 841 /* disable the timer */ 842 - timr->it.real.interval.tv64 = 0; 842 + timr->it.real.interval = 0; 843 843 /* 844 844 * careful here. If smp we could be in the "fire" routine which will 845 845 * be spinning as we hold the lock. But this is ONLY an SMP issue. ··· 924 924 925 925 static int common_timer_del(struct k_itimer *timer) 926 926 { 927 - timer->it.real.interval.tv64 = 0; 927 + timer->it.real.interval = 0; 928 928 929 929 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) 930 930 return TIMER_RETRY;
+1 -1
kernel/time/tick-broadcast-hrtimer.c
··· 97 97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); 98 98 99 99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) 100 - if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) 100 + if (ce_broadcast_hrtimer.next_event != KTIME_MAX) 101 101 return HRTIMER_RESTART; 102 102 103 103 return HRTIMER_NORESTART;
+12 -12
kernel/time/tick-broadcast.c
··· 604 604 bool bc_local; 605 605 606 606 raw_spin_lock(&tick_broadcast_lock); 607 - dev->next_event.tv64 = KTIME_MAX; 608 - next_event.tv64 = KTIME_MAX; 607 + dev->next_event = KTIME_MAX; 608 + next_event = KTIME_MAX; 609 609 cpumask_clear(tmpmask); 610 610 now = ktime_get(); 611 611 /* Find all expired events */ 612 612 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 613 613 td = &per_cpu(tick_cpu_device, cpu); 614 - if (td->evtdev->next_event.tv64 <= now.tv64) { 614 + if (td->evtdev->next_event <= now) { 615 615 cpumask_set_cpu(cpu, tmpmask); 616 616 /* 617 617 * Mark the remote cpu in the pending mask, so ··· 619 619 * timer in tick_broadcast_oneshot_control(). 620 620 */ 621 621 cpumask_set_cpu(cpu, tick_broadcast_pending_mask); 622 - } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 623 - next_event.tv64 = td->evtdev->next_event.tv64; 622 + } else if (td->evtdev->next_event < next_event) { 623 + next_event = td->evtdev->next_event; 624 624 next_cpu = cpu; 625 625 } 626 626 } ··· 657 657 * - There are pending events on sleeping CPUs which were not 658 658 * in the event mask 659 659 */ 660 - if (next_event.tv64 != KTIME_MAX) 660 + if (next_event != KTIME_MAX) 661 661 tick_broadcast_set_event(dev, next_cpu, next_event); 662 662 663 663 raw_spin_unlock(&tick_broadcast_lock); ··· 672 672 { 673 673 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) 674 674 return 0; 675 - if (bc->next_event.tv64 == KTIME_MAX) 675 + if (bc->next_event == KTIME_MAX) 676 676 return 0; 677 677 return bc->bound_on == cpu ? -EBUSY : 0; 678 678 } ··· 688 688 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { 689 689 if (broadcast_needs_cpu(bc, smp_processor_id())) 690 690 return; 691 - if (dev->next_event.tv64 < bc->next_event.tv64) 691 + if (dev->next_event < bc->next_event) 692 692 return; 693 693 } 694 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); ··· 754 754 */ 755 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { 756 756 ret = -EBUSY; 757 - } else if (dev->next_event.tv64 < bc->next_event.tv64) { 757 + } else if (dev->next_event < bc->next_event) { 758 758 tick_broadcast_set_event(bc, cpu, dev->next_event); 759 759 /* 760 760 * In case of hrtimer broadcasts the ··· 789 789 /* 790 790 * Bail out if there is no next event. 791 791 */ 792 - if (dev->next_event.tv64 == KTIME_MAX) 792 + if (dev->next_event == KTIME_MAX) 793 793 goto out; 794 794 /* 795 795 * If the pending bit is not set, then we are ··· 824 824 * nohz fixups. 825 825 */ 826 826 now = ktime_get(); 827 - if (dev->next_event.tv64 <= now.tv64) { 827 + if (dev->next_event <= now) { 828 828 cpumask_set_cpu(cpu, tick_broadcast_force_mask); 829 829 goto out; 830 830 } ··· 897 897 tick_next_period); 898 898 tick_broadcast_set_event(bc, cpu, tick_next_period); 899 899 } else 900 - bc->next_event.tv64 = KTIME_MAX; 900 + bc->next_event = KTIME_MAX; 901 901 } else { 902 902 /* 903 903 * The first cpu which switches to oneshot mode sets
+2 -2
kernel/time/tick-common.c
··· 178 178 struct clock_event_device *newdev, int cpu, 179 179 const struct cpumask *cpumask) 180 180 { 181 - ktime_t next_event; 182 181 void (*handler)(struct clock_event_device *) = NULL; 182 + ktime_t next_event = 0; 183 183 184 184 /* 185 185 * First device setup ? ··· 195 195 else 196 196 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 197 197 tick_next_period = ktime_get(); 198 - tick_period = ktime_set(0, NSEC_PER_SEC / HZ); 198 + tick_period = NSEC_PER_SEC / HZ; 199 199 } 200 200 201 201 /*
+1 -1
kernel/time/tick-oneshot.c
··· 28 28 { 29 29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 30 30 31 - if (unlikely(expires.tv64 == KTIME_MAX)) { 31 + if (unlikely(expires == KTIME_MAX)) { 32 32 /* 33 33 * We don't need the clock event device any more, stop it. 34 34 */
+11 -11
kernel/time/tick-sched.c
··· 58 58 * Do a quick check without holding jiffies_lock: 59 59 */ 60 60 delta = ktime_sub(now, last_jiffies_update); 61 - if (delta.tv64 < tick_period.tv64) 61 + if (delta < tick_period) 62 62 return; 63 63 64 64 /* Reevaluate with jiffies_lock held */ 65 65 write_seqlock(&jiffies_lock); 66 66 67 67 delta = ktime_sub(now, last_jiffies_update); 68 - if (delta.tv64 >= tick_period.tv64) { 68 + if (delta >= tick_period) { 69 69 70 70 delta = ktime_sub(delta, tick_period); 71 71 last_jiffies_update = ktime_add(last_jiffies_update, 72 72 tick_period); 73 73 74 74 /* Slow path for long timeouts */ 75 - if (unlikely(delta.tv64 >= tick_period.tv64)) { 75 + if (unlikely(delta >= tick_period)) { 76 76 s64 incr = ktime_to_ns(tick_period); 77 77 78 78 ticks = ktime_divns(delta, incr); ··· 101 101 102 102 write_seqlock(&jiffies_lock); 103 103 /* Did we start the jiffies update yet ? */ 104 - if (last_jiffies_update.tv64 == 0) 104 + if (last_jiffies_update == 0) 105 105 last_jiffies_update = tick_next_period; 106 106 period = last_jiffies_update; 107 107 write_sequnlock(&jiffies_lock); ··· 669 669 /* Read jiffies and the time when jiffies were updated last */ 670 670 do { 671 671 seq = read_seqbegin(&jiffies_lock); 672 - basemono = last_jiffies_update.tv64; 672 + basemono = last_jiffies_update; 673 673 basejiff = jiffies; 674 674 } while (read_seqretry(&jiffies_lock, seq)); 675 675 ts->last_jiffies = basejiff; ··· 697 697 */ 698 698 delta = next_tick - basemono; 699 699 if (delta <= (u64)TICK_NSEC) { 700 - tick.tv64 = 0; 700 + tick = 0; 701 701 702 702 /* 703 703 * Tell the timer code that the base is not idle, i.e. undo ··· 764 764 expires = KTIME_MAX; 765 765 766 766 expires = min_t(u64, expires, next_tick); 767 - tick.tv64 = expires; 767 + tick = expires; 768 768 769 769 /* Skip reprogram of event if its not changed */ 770 - if (ts->tick_stopped && (expires == dev->next_event.tv64)) 770 + if (ts->tick_stopped && (expires == dev->next_event)) 771 771 goto out; 772 772 773 773 /* ··· 864 864 } 865 865 866 866 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { 867 - ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; 867 + ts->sleep_length = NSEC_PER_SEC / HZ; 868 868 return false; 869 869 } 870 870 ··· 914 914 ts->idle_calls++; 915 915 916 916 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 917 - if (expires.tv64 > 0LL) { 917 + if (expires > 0LL) { 918 918 ts->idle_sleeps++; 919 919 ts->idle_expires = expires; 920 920 } ··· 1051 1051 struct pt_regs *regs = get_irq_regs(); 1052 1052 ktime_t now = ktime_get(); 1053 1053 1054 - dev->next_event.tv64 = KTIME_MAX; 1054 + dev->next_event = KTIME_MAX; 1055 1055 1056 1056 tick_sched_do_timer(now); 1057 1057 tick_sched_handle(ts, regs);
+3 -3
kernel/time/timecounter.c
··· 43 43 */ 44 44 static u64 timecounter_read_delta(struct timecounter *tc) 45 45 { 46 - cycle_t cycle_now, cycle_delta; 46 + u64 cycle_now, cycle_delta; 47 47 u64 ns_offset; 48 48 49 49 /* read cycle counter: */ ··· 80 80 * time previous to the time stored in the cycle counter. 81 81 */ 82 82 static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, 83 - cycle_t cycles, u64 mask, u64 frac) 83 + u64 cycles, u64 mask, u64 frac) 84 84 { 85 85 u64 ns = (u64) cycles; 86 86 ··· 90 90 } 91 91 92 92 u64 timecounter_cyc2time(struct timecounter *tc, 93 - cycle_t cycle_tstamp) 93 + u64 cycle_tstamp) 94 94 { 95 95 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; 96 96 u64 nsec = tc->nsec, frac = tc->frac;
+30 -33
kernel/time/timekeeping.c
··· 104 104 */ 105 105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, 106 106 -tk->wall_to_monotonic.tv_nsec); 107 - WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); 107 + WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); 108 108 tk->wall_to_monotonic = wtm; 109 109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 110 110 tk->offs_real = timespec64_to_ktime(tmp); ··· 119 119 #ifdef CONFIG_DEBUG_TIMEKEEPING 120 120 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ 121 121 122 - static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) 122 + static void timekeeping_check_update(struct timekeeper *tk, u64 offset) 123 123 { 124 124 125 - cycle_t max_cycles = tk->tkr_mono.clock->max_cycles; 125 + u64 max_cycles = tk->tkr_mono.clock->max_cycles; 126 126 const char *name = tk->tkr_mono.clock->name; 127 127 128 128 if (offset > max_cycles) { ··· 158 158 } 159 159 } 160 160 161 - static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) 161 + static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) 162 162 { 163 163 struct timekeeper *tk = &tk_core.timekeeper; 164 - cycle_t now, last, mask, max, delta; 164 + u64 now, last, mask, max, delta; 165 165 unsigned int seq; 166 166 167 167 /* ··· 199 199 return delta; 200 200 } 201 201 #else 202 - static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) 202 + static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) 203 203 { 204 204 } 205 - static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) 205 + static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) 206 206 { 207 - cycle_t cycle_now, delta; 207 + u64 cycle_now, delta; 208 208 209 209 /* read clocksource */ 210 210 cycle_now = tkr->read(tkr->clock); ··· 229 229 */ 230 230 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) 231 231 { 232 - cycle_t interval; 232 + u64 interval; 233 233 u64 tmp, ntpinterval; 234 234 struct clocksource *old_clock; 235 235 ··· 254 254 if (tmp == 0) 255 255 tmp = 1; 256 256 257 - interval = (cycle_t) tmp; 257 + interval = (u64) tmp; 258 258 tk->cycle_interval = interval; 259 259 260 260 /* Go back from cycles -> shifted ns */ ··· 298 298 static inline u32 arch_gettimeoffset(void) { return 0; } 299 299 #endif 300 300 301 - static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, 302 - cycle_t delta) 301 + static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) 303 302 { 304 303 u64 nsec; 305 304 ··· 311 312 312 313 static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) 313 314 { 314 - cycle_t delta; 315 + u64 delta; 315 316 316 317 delta = timekeeping_get_delta(tkr); 317 318 return timekeeping_delta_to_ns(tkr, delta); 318 319 } 319 320 320 - static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, 321 - cycle_t cycles) 321 + static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) 322 322 { 323 - cycle_t delta; 323 + u64 delta; 324 324 325 325 /* calculate the delta since the last update_wall_time */ 326 326 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); ··· 452 454 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); 453 455 454 456 /* Suspend-time cycles value for halted fast timekeeper. */ 455 - static cycle_t cycles_at_suspend; 457 + static u64 cycles_at_suspend; 456 458 457 - static cycle_t dummy_clock_read(struct clocksource *cs) 459 + static u64 dummy_clock_read(struct clocksource *cs) 458 460 { 459 461 return cycles_at_suspend; 460 462 } ··· 571 573 static inline void tk_update_leap_state(struct timekeeper *tk) 572 574 { 573 575 tk->next_leap_ktime = ntp_get_next_leap(); 574 - if (tk->next_leap_ktime.tv64 != KTIME_MAX) 576 + if (tk->next_leap_ktime != KTIME_MAX) 575 577 /* Convert to monotonic time */ 576 578 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); 577 579 } ··· 648 650 static void timekeeping_forward_now(struct timekeeper *tk) 649 651 { 650 652 struct clocksource *clock = tk->tkr_mono.clock; 651 - cycle_t cycle_now, delta; 653 + u64 cycle_now, delta; 652 654 u64 nsec; 653 655 654 656 cycle_now = tk->tkr_mono.read(clock); ··· 921 923 ktime_t base_real; 922 924 u64 nsec_raw; 923 925 u64 nsec_real; 924 - cycle_t now; 926 + u64 now; 925 927 926 928 WARN_ON_ONCE(timekeeping_suspended); 927 929 ··· 980 982 * interval is partial_history_cycles. 981 983 */ 982 984 static int adjust_historical_crosststamp(struct system_time_snapshot *history, 983 - cycle_t partial_history_cycles, 984 - cycle_t total_history_cycles, 985 + u64 partial_history_cycles, 986 + u64 total_history_cycles, 985 987 bool discontinuity, 986 988 struct system_device_crosststamp *ts) 987 989 { ··· 1045 1047 /* 1046 1048 * cycle_between - true if test occurs chronologically between before and after 1047 1049 */ 1048 - static bool cycle_between(cycle_t before, cycle_t test, cycle_t after) 1050 + static bool cycle_between(u64 before, u64 test, u64 after) 1049 1051 { 1050 1052 if (test > before && test < after) 1051 1053 return true; ··· 1075 1077 { 1076 1078 struct system_counterval_t system_counterval; 1077 1079 struct timekeeper *tk = &tk_core.timekeeper; 1078 - cycle_t cycles, now, interval_start; 1080 + u64 cycles, now, interval_start; 1079 1081 unsigned int clock_was_set_seq = 0; 1080 1082 ktime_t base_real, base_raw; 1081 1083 u64 nsec_real, nsec_raw; ··· 1136 1138 * current interval 1137 1139 */ 1138 1140 if (do_interp) { 1139 - cycle_t partial_history_cycles, total_history_cycles; 1141 + u64 partial_history_cycles, total_history_cycles; 1140 1142 bool discontinuity; 1141 1143 1142 1144 /* ··· 1642 1644 struct clocksource *clock = tk->tkr_mono.clock; 1643 1645 unsigned long flags; 1644 1646 struct timespec64 ts_new, ts_delta; 1645 - cycle_t cycle_now; 1647 + u64 cycle_now; 1646 1648 1647 1649 sleeptime_injected = false; 1648 1650 read_persistent_clock64(&ts_new); ··· 2008 2010 * 2009 2011 * Returns the unconsumed cycles. 2010 2012 */ 2011 - static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, 2012 - u32 shift, 2013 - unsigned int *clock_set) 2013 + static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, 2014 + u32 shift, unsigned int *clock_set) 2014 2015 { 2015 - cycle_t interval = tk->cycle_interval << shift; 2016 + u64 interval = tk->cycle_interval << shift; 2016 2017 u64 raw_nsecs; 2017 2018 2018 2019 /* If the offset is smaller than a shifted interval, do nothing */ ··· 2052 2055 { 2053 2056 struct timekeeper *real_tk = &tk_core.timekeeper; 2054 2057 struct timekeeper *tk = &shadow_timekeeper; 2055 - cycle_t offset; 2058 + u64 offset; 2056 2059 int shift = 0, maxshift; 2057 2060 unsigned int clock_set = 0; 2058 2061 unsigned long flags; ··· 2250 2253 } 2251 2254 2252 2255 /* Handle leapsecond insertion adjustments */ 2253 - if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64)) 2256 + if (unlikely(base >= tk->next_leap_ktime)) 2254 2257 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); 2255 2258 2256 2259 } while (read_seqcount_retry(&tk_core.seq, seq));
+3 -3
kernel/time/timekeeping_internal.h
··· 13 13 #endif 14 14 15 15 #ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE 16 - static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) 16 + static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) 17 17 { 18 - cycle_t ret = (now - last) & mask; 18 + u64 ret = (now - last) & mask; 19 19 20 20 /* 21 21 * Prevent time going backwards by checking the MSB of mask in ··· 24 24 return ret & ~(mask >> 1) ? 0 : ret; 25 25 } 26 26 #else 27 - static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) 27 + static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) 28 28 { 29 29 return (now - last) & mask; 30 30 }
+2 -2
kernel/trace/ftrace.c
··· 2847 2847 } 2848 2848 } 2849 2849 2850 - static cycle_t ftrace_update_time; 2850 + static u64 ftrace_update_time; 2851 2851 unsigned long ftrace_update_tot_cnt; 2852 2852 2853 2853 static inline int ops_traces_mod(struct ftrace_ops *ops) ··· 2894 2894 { 2895 2895 struct ftrace_page *pg; 2896 2896 struct dyn_ftrace *p; 2897 - cycle_t start, stop; 2897 + u64 start, stop; 2898 2898 unsigned long update_cnt = 0; 2899 2899 unsigned long rec_flags = 0; 2900 2900 int i;
+3 -3
kernel/trace/trace.c
··· 236 236 } 237 237 __setup("tp_printk", set_tracepoint_printk); 238 238 239 - unsigned long long ns2usecs(cycle_t nsec) 239 + unsigned long long ns2usecs(u64 nsec) 240 240 { 241 241 nsec += 500; 242 242 do_div(nsec, 1000); ··· 573 573 return read; 574 574 } 575 575 576 - static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 576 + static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) 577 577 { 578 578 u64 ts; 579 579 ··· 587 587 return ts; 588 588 } 589 589 590 - cycle_t ftrace_now(int cpu) 590 + u64 ftrace_now(int cpu) 591 591 { 592 592 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); 593 593 }
+4 -4
kernel/trace/trace.h
··· 159 159 unsigned long policy; 160 160 unsigned long rt_priority; 161 161 unsigned long skipped_entries; 162 - cycle_t preempt_timestamp; 162 + u64 preempt_timestamp; 163 163 pid_t pid; 164 164 kuid_t uid; 165 165 char comm[TASK_COMM_LEN]; ··· 177 177 struct trace_array *tr; 178 178 struct ring_buffer *buffer; 179 179 struct trace_array_cpu __percpu *data; 180 - cycle_t time_start; 180 + u64 time_start; 181 181 int cpu; 182 182 }; 183 183 ··· 689 689 } 690 690 #endif /* CONFIG_STACKTRACE */ 691 691 692 - extern cycle_t ftrace_now(int cpu); 692 + extern u64 ftrace_now(int cpu); 693 693 694 694 extern void trace_find_cmdline(int pid, char comm[]); 695 695 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); ··· 736 736 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 737 737 738 738 extern void *head_page(struct trace_array_cpu *data); 739 - extern unsigned long long ns2usecs(cycle_t nsec); 739 + extern unsigned long long ns2usecs(u64 nsec); 740 740 extern int 741 741 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 742 742 extern int
+2 -2
kernel/trace/trace_irqsoff.c
··· 298 298 /* 299 299 * Should this new latency be reported/recorded? 300 300 */ 301 - static bool report_latency(struct trace_array *tr, cycle_t delta) 301 + static bool report_latency(struct trace_array *tr, u64 delta) 302 302 { 303 303 if (tracing_thresh) { 304 304 if (delta < tracing_thresh) ··· 316 316 unsigned long parent_ip, 317 317 int cpu) 318 318 { 319 - cycle_t T0, T1, delta; 319 + u64 T0, T1, delta; 320 320 unsigned long flags; 321 321 int pc; 322 322
+2 -2
kernel/trace/trace_sched_wakeup.c
··· 358 358 /* 359 359 * Should this new latency be reported/recorded? 360 360 */ 361 - static bool report_latency(struct trace_array *tr, cycle_t delta) 361 + static bool report_latency(struct trace_array *tr, u64 delta) 362 362 { 363 363 if (tracing_thresh) { 364 364 if (delta < tracing_thresh) ··· 440 440 struct task_struct *prev, struct task_struct *next) 441 441 { 442 442 struct trace_array_cpu *data; 443 - cycle_t T0, T1, delta; 443 + u64 T0, T1, delta; 444 444 unsigned long flags; 445 445 long disabled; 446 446 int cpu;
+2 -2
lib/timerqueue.c
··· 48 48 while (*p) { 49 49 parent = *p; 50 50 ptr = rb_entry(parent, struct timerqueue_node, node); 51 - if (node->expires.tv64 < ptr->expires.tv64) 51 + if (node->expires < ptr->expires) 52 52 p = &(*p)->rb_left; 53 53 else 54 54 p = &(*p)->rb_right; ··· 56 56 rb_link_node(&node->node, parent, p); 57 57 rb_insert_color(&node->node, &head->head); 58 58 59 - if (!head->next || node->expires.tv64 < head->next->expires.tv64) { 59 + if (!head->next || node->expires < head->next->expires) { 60 60 head->next = node; 61 61 return true; 62 62 }
+16 -16
net/can/bcm.c
··· 199 199 200 200 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); 201 201 202 - if (op->kt_ival1.tv64) 202 + if (op->kt_ival1) 203 203 seq_printf(m, "timeo=%lld ", 204 204 (long long)ktime_to_us(op->kt_ival1)); 205 205 206 - if (op->kt_ival2.tv64) 206 + if (op->kt_ival2) 207 207 seq_printf(m, "thr=%lld ", 208 208 (long long)ktime_to_us(op->kt_ival2)); 209 209 ··· 226 226 else 227 227 seq_printf(m, "[%u] ", op->nframes); 228 228 229 - if (op->kt_ival1.tv64) 229 + if (op->kt_ival1) 230 230 seq_printf(m, "t1=%lld ", 231 231 (long long)ktime_to_us(op->kt_ival1)); 232 232 233 - if (op->kt_ival2.tv64) 233 + if (op->kt_ival2) 234 234 seq_printf(m, "t2=%lld ", 235 235 (long long)ktime_to_us(op->kt_ival2)); 236 236 ··· 365 365 366 366 static void bcm_tx_start_timer(struct bcm_op *op) 367 367 { 368 - if (op->kt_ival1.tv64 && op->count) 368 + if (op->kt_ival1 && op->count) 369 369 hrtimer_start(&op->timer, 370 370 ktime_add(ktime_get(), op->kt_ival1), 371 371 HRTIMER_MODE_ABS); 372 - else if (op->kt_ival2.tv64) 372 + else if (op->kt_ival2) 373 373 hrtimer_start(&op->timer, 374 374 ktime_add(ktime_get(), op->kt_ival2), 375 375 HRTIMER_MODE_ABS); ··· 380 380 struct bcm_op *op = (struct bcm_op *)data; 381 381 struct bcm_msg_head msg_head; 382 382 383 - if (op->kt_ival1.tv64 && (op->count > 0)) { 383 + if (op->kt_ival1 && (op->count > 0)) { 384 384 385 385 op->count--; 386 386 if (!op->count && (op->flags & TX_COUNTEVT)) { ··· 398 398 } 399 399 bcm_can_tx(op); 400 400 401 - } else if (op->kt_ival2.tv64) 401 + } else if (op->kt_ival2) 402 402 bcm_can_tx(op); 403 403 404 404 bcm_tx_start_timer(op); ··· 459 459 lastdata->flags |= (RX_RECV|RX_THR); 460 460 461 461 /* throttling mode inactive ? */ 462 - if (!op->kt_ival2.tv64) { 462 + if (!op->kt_ival2) { 463 463 /* send RX_CHANGED to the user immediately */ 464 464 bcm_rx_changed(op, lastdata); 465 465 return; ··· 470 470 return; 471 471 472 472 /* first reception with enabled throttling mode */ 473 - if (!op->kt_lastmsg.tv64) 473 + if (!op->kt_lastmsg) 474 474 goto rx_changed_settime; 475 475 476 476 /* got a second frame inside a potential throttle period? */ ··· 537 537 if (op->flags & RX_NO_AUTOTIMER) 538 538 return; 539 539 540 - if (op->kt_ival1.tv64) 540 + if (op->kt_ival1) 541 541 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 542 542 } 543 543 ··· 643 643 return HRTIMER_RESTART; 644 644 } else { 645 645 /* rearm throttle handling */ 646 - op->kt_lastmsg = ktime_set(0, 0); 646 + op->kt_lastmsg = 0; 647 647 return HRTIMER_NORESTART; 648 648 } 649 649 } ··· 1005 1005 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1006 1006 1007 1007 /* disable an active timer due to zero values? */ 1008 - if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 1008 + if (!op->kt_ival1 && !op->kt_ival2) 1009 1009 hrtimer_cancel(&op->timer); 1010 1010 } 1011 1011 ··· 1189 1189 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1190 1190 1191 1191 /* disable an active timer due to zero value? */ 1192 - if (!op->kt_ival1.tv64) 1192 + if (!op->kt_ival1) 1193 1193 hrtimer_cancel(&op->timer); 1194 1194 1195 1195 /* 1196 1196 * In any case cancel the throttle timer, flush 1197 1197 * potentially blocked msgs and reset throttle handling 1198 1198 */ 1199 - op->kt_lastmsg = ktime_set(0, 0); 1199 + op->kt_lastmsg = 0; 1200 1200 hrtimer_cancel(&op->thrtimer); 1201 1201 bcm_rx_thr_flush(op, 1); 1202 1202 } 1203 1203 1204 - if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1204 + if ((op->flags & STARTTIMER) && op->kt_ival1) 1205 1205 hrtimer_start(&op->timer, op->kt_ival1, 1206 1206 HRTIMER_MODE_REL); 1207 1207 }
+1 -1
net/can/gw.c
··· 429 429 430 430 /* clear the skb timestamp if not configured the other way */ 431 431 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP)) 432 - nskb->tstamp.tv64 = 0; 432 + nskb->tstamp = 0; 433 433 434 434 /* send to netdevice */ 435 435 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
+2 -2
net/core/dev.c
··· 1731 1731 1732 1732 static inline void net_timestamp_set(struct sk_buff *skb) 1733 1733 { 1734 - skb->tstamp.tv64 = 0; 1734 + skb->tstamp = 0; 1735 1735 if (static_key_false(&netstamp_needed)) 1736 1736 __net_timestamp(skb); 1737 1737 } 1738 1738 1739 1739 #define net_timestamp_check(COND, SKB) \ 1740 1740 if (static_key_false(&netstamp_needed)) { \ 1741 - if ((COND) && !(SKB)->tstamp.tv64) \ 1741 + if ((COND) && !(SKB)->tstamp) \ 1742 1742 __net_timestamp(SKB); \ 1743 1743 } \ 1744 1744
+1 -1
net/core/skbuff.c
··· 4368 4368 */ 4369 4369 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4370 4370 { 4371 - skb->tstamp.tv64 = 0; 4371 + skb->tstamp = 0; 4372 4372 skb->pkt_type = PACKET_HOST; 4373 4373 skb->skb_iif = 0; 4374 4374 skb->ignore_df = 0;
+2 -2
net/ipv4/tcp_output.c
··· 1038 1038 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1039 1039 1040 1040 /* Our usage of tstamp should remain private */ 1041 - skb->tstamp.tv64 = 0; 1041 + skb->tstamp = 0; 1042 1042 1043 1043 /* Cleanup our debris for IP stacks */ 1044 1044 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), ··· 3203 3203 #endif 3204 3204 3205 3205 /* Do not fool tcpdump (if any), clean our debris */ 3206 - skb->tstamp.tv64 = 0; 3206 + skb->tstamp = 0; 3207 3207 return skb; 3208 3208 } 3209 3209 EXPORT_SYMBOL(tcp_make_synack);
+1 -1
net/ipv6/exthdrs.c
··· 232 232 ipv6h->saddr = hao->addr; 233 233 hao->addr = tmp_addr; 234 234 235 - if (skb->tstamp.tv64 == 0) 235 + if (skb->tstamp == 0) 236 236 __net_timestamp(skb); 237 237 238 238 return true;
+1 -1
net/ipv6/mip6.c
··· 191 191 int allow = 0; 192 192 193 193 spin_lock_bh(&mip6_report_rl.lock); 194 - if (!ktime_equal(mip6_report_rl.stamp, stamp) || 194 + if (mip6_report_rl.stamp != stamp || 195 195 mip6_report_rl.iif != iif || 196 196 !ipv6_addr_equal(&mip6_report_rl.src, src) || 197 197 !ipv6_addr_equal(&mip6_report_rl.dst, dst)) {
+1 -1
net/ipx/af_ipx.c
··· 1809 1809 rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied); 1810 1810 if (rc) 1811 1811 goto out_free; 1812 - if (skb->tstamp.tv64) 1812 + if (skb->tstamp) 1813 1813 sk->sk_stamp = skb->tstamp; 1814 1814 1815 1815 if (sipx) {
+2 -2
net/mac802154/util.c
··· 80 80 81 81 if (skb->len > max_sifs_size) 82 82 hrtimer_start(&local->ifs_timer, 83 - ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC), 83 + hw->phy->lifs_period * NSEC_PER_USEC, 84 84 HRTIMER_MODE_REL); 85 85 else 86 86 hrtimer_start(&local->ifs_timer, 87 - ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC), 87 + hw->phy->sifs_period * NSEC_PER_USEC, 88 88 HRTIMER_MODE_REL); 89 89 } else { 90 90 ieee802154_wake_queue(hw);
+1 -1
net/netfilter/nf_conntrack_core.c
··· 783 783 /* set conntrack timestamp, if enabled. */ 784 784 tstamp = nf_conn_tstamp_find(ct); 785 785 if (tstamp) { 786 - if (skb->tstamp.tv64 == 0) 786 + if (skb->tstamp == 0) 787 787 __net_timestamp(skb); 788 788 789 789 tstamp->start = ktime_to_ns(skb->tstamp);
+1 -1
net/netfilter/xt_time.c
··· 168 168 * may happen that the same packet matches both rules if 169 169 * it arrived at the right moment before 13:00. 170 170 */ 171 - if (skb->tstamp.tv64 == 0) 171 + if (skb->tstamp == 0) 172 172 __net_timestamp((struct sk_buff *)skb); 173 173 174 174 stamp = ktime_to_ns(skb->tstamp);
+1 -1
net/sched/sch_cbq.c
··· 509 509 if (delay) { 510 510 ktime_t time; 511 511 512 - time = ktime_set(0, 0); 512 + time = 0; 513 513 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); 514 514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); 515 515 }
+1 -1
net/sched/sch_netem.c
··· 627 627 * from the network (tstamp will be updated). 628 628 */ 629 629 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) 630 - skb->tstamp.tv64 = 0; 630 + skb->tstamp = 0; 631 631 #endif 632 632 633 633 if (q->qdisc) {
+1 -1
net/sctp/transport.c
··· 72 72 */ 73 73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 74 74 75 - peer->last_time_heard = ktime_set(0, 0); 75 + peer->last_time_heard = 0; 76 76 peer->last_time_ecne_reduced = jiffies; 77 77 78 78 peer->param_flags = SPP_HB_DISABLE |
+1 -1
net/socket.c
··· 668 668 669 669 /* Race occurred between timestamp enabling and packet 670 670 receiving. Fill in the current time for now. */ 671 - if (need_software_tstamp && skb->tstamp.tv64 == 0) 671 + if (need_software_tstamp && skb->tstamp == 0) 672 672 __net_timestamp(skb); 673 673 674 674 if (need_software_tstamp) {
+1 -1
net/sunrpc/svcsock.c
··· 574 574 } 575 575 len = svc_addr_len(svc_addr(rqstp)); 576 576 rqstp->rq_addrlen = len; 577 - if (skb->tstamp.tv64 == 0) { 577 + if (skb->tstamp == 0) { 578 578 skb->tstamp = ktime_get_real(); 579 579 /* Don't enable netstamp, sunrpc doesn't 580 580 need that much accuracy */
+1 -1
net/xfrm/xfrm_state.c
··· 1404 1404 if (x->curlft.bytes >= x->lft.hard_byte_limit || 1405 1405 x->curlft.packets >= x->lft.hard_packet_limit) { 1406 1406 x->km.state = XFRM_STATE_EXPIRED; 1407 - tasklet_hrtimer_start(&x->mtimer, ktime_set(0, 0), HRTIMER_MODE_REL); 1407 + tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL); 1408 1408 return -EINVAL; 1409 1409 } 1410 1410
+1 -1
sound/core/hrtimer.c
··· 58 58 59 59 /* calculate the drift */ 60 60 delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt)); 61 - if (delta.tv64 > 0) 61 + if (delta > 0) 62 62 ticks += ktime_divns(delta, ticks * resolution); 63 63 64 64 snd_timer_interrupt(stime->timer, ticks);
+1 -1
sound/drivers/pcsp/pcsp_lib.c
··· 166 166 atomic_set(&chip->timer_active, 1); 167 167 chip->thalf = 0; 168 168 169 - hrtimer_start(&pcsp_chip.timer, ktime_set(0, 0), HRTIMER_MODE_REL); 169 + hrtimer_start(&pcsp_chip.timer, 0, HRTIMER_MODE_REL); 170 170 return 0; 171 171 } 172 172
+3 -3
sound/firewire/lib.c
··· 114 114 snd_rawmidi_transmit_ack(substream, port->consume_bytes); 115 115 else if (!rcode_is_permanent_error(rcode)) 116 116 /* To start next transaction immediately for recovery. */ 117 - port->next_ktime = ktime_set(0, 0); 117 + port->next_ktime = 0; 118 118 else 119 119 /* Don't continue processing. */ 120 120 port->error = true; ··· 156 156 if (port->consume_bytes <= 0) { 157 157 /* Do it in next chance, immediately. */ 158 158 if (port->consume_bytes == 0) { 159 - port->next_ktime = ktime_set(0, 0); 159 + port->next_ktime = 0; 160 160 schedule_work(&port->work); 161 161 } else { 162 162 /* Fatal error. */ ··· 219 219 port->addr = addr; 220 220 port->fill = fill; 221 221 port->idling = true; 222 - port->next_ktime = ktime_set(0, 0); 222 + port->next_ktime = 0; 223 223 port->error = false; 224 224 225 225 INIT_WORK(&port->work, midi_port_work);
+3 -3
sound/hda/hdac_stream.c
··· 465 465 } 466 466 EXPORT_SYMBOL_GPL(snd_hdac_stream_set_params); 467 467 468 - static cycle_t azx_cc_read(const struct cyclecounter *cc) 468 + static u64 azx_cc_read(const struct cyclecounter *cc) 469 469 { 470 470 struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc); 471 471 ··· 473 473 } 474 474 475 475 static void azx_timecounter_init(struct hdac_stream *azx_dev, 476 - bool force, cycle_t last) 476 + bool force, u64 last) 477 477 { 478 478 struct timecounter *tc = &azx_dev->tc; 479 479 struct cyclecounter *cc = &azx_dev->cc; ··· 523 523 struct snd_pcm_runtime *runtime = azx_dev->substream->runtime; 524 524 struct hdac_stream *s; 525 525 bool inited = false; 526 - cycle_t cycle_last = 0; 526 + u64 cycle_last = 0; 527 527 int i = 0; 528 528 529 529 list_for_each_entry(s, &bus->stream_list, list) {
+1 -1
sound/sh/sh_dac_audio.c
··· 87 87 88 88 static void dac_audio_set_rate(struct snd_sh_dac *chip) 89 89 { 90 - chip->wakeups_per_second = ktime_set(0, 1000000000 / chip->rate); 90 + chip->wakeups_per_second = 1000000000 / chip->rate; 91 91 } 92 92 93 93
+3 -3
virt/kvm/arm/arch_timer.c
··· 39 39 vcpu->arch.timer_cpu.active_cleared_last = false; 40 40 } 41 41 42 - static cycle_t kvm_phys_timer_read(void) 42 + static u64 kvm_phys_timer_read(void) 43 43 { 44 44 return timecounter->cc->read(timecounter->cc); 45 45 } ··· 102 102 103 103 static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) 104 104 { 105 - cycle_t cval, now; 105 + u64 cval, now; 106 106 107 107 cval = vcpu->arch.timer_cpu.cntv_cval; 108 108 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; ··· 155 155 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) 156 156 { 157 157 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 158 - cycle_t cval, now; 158 + u64 cval, now; 159 159 160 160 if (!kvm_timer_irq_can_fire(vcpu)) 161 161 return false;