Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/include/linux/timecounter.h
4 *
5 * based on code that migrated away from
6 * linux/include/linux/clocksource.h
7 */
8#ifndef _LINUX_TIMECOUNTER_H
9#define _LINUX_TIMECOUNTER_H
10
11#include <linux/types.h>
12
13/* simplify initialization of mask field */
14#define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
15
16/**
17 * struct cyclecounter - hardware abstraction for a free running counter
18 * Provides completely state-free accessors to the underlying hardware.
19 * Depending on which hardware it reads, the cycle counter may wrap
20 * around quickly. Locking rules (if necessary) have to be defined
21 * by the implementor and user of specific instances of this API.
22 *
23 * @read: returns the current cycle value
24 * @mask: bitmask for two's complement
25 * subtraction of non-64-bit counters,
26 * see CYCLECOUNTER_MASK() helper macro
27 * @mult: cycle to nanosecond multiplier
28 * @shift: cycle to nanosecond divisor (power of two)
29 */
30struct cyclecounter {
31 u64 (*read)(struct cyclecounter *cc);
32 u64 mask;
33 u32 mult;
34 u32 shift;
35};
36
37/**
38 * struct timecounter - layer above a &struct cyclecounter which counts nanoseconds
39 * Contains the state needed by timecounter_read() to detect
40 * cycle counter wrap around. Initialize with
41 * timecounter_init(). Also used to convert cycle counts into the
42 * corresponding nanosecond counts with timecounter_cyc2time(). Users
43 * of this code are responsible for initializing the underlying
44 * cycle counter hardware, locking issues and reading the time
45 * more often than the cycle counter wraps around. The nanosecond
46 * counter will only wrap around after ~585 years.
47 *
48 * @cc: the cycle counter used by this instance
49 * @cycle_last: most recent cycle counter value seen by
50 * timecounter_read()
51 * @nsec: continuously increasing count
52 * @mask: bit mask for maintaining the 'frac' field
53 * @frac: accumulated fractional nanoseconds
54 */
55struct timecounter {
56 struct cyclecounter *cc;
57 u64 cycle_last;
58 u64 nsec;
59 u64 mask;
60 u64 frac;
61};
62
63/**
64 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
65 * @cc: Pointer to cycle counter.
66 * @cycles: Cycles
67 * @mask: bit mask for maintaining the 'frac' field
68 * @frac: pointer to storage for the fractional nanoseconds.
69 *
70 * Returns: cycle counter cycles converted to nanoseconds
71 */
72static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
73 u64 cycles, u64 mask, u64 *frac)
74{
75 u64 ns = (u64) cycles;
76
77 ns = (ns * cc->mult) + *frac;
78 *frac = ns & mask;
79 return ns >> cc->shift;
80}
81
82/**
83 * timecounter_adjtime - Shifts the time of the clock.
84 * @tc: The &struct timecounter to adjust
85 * @delta: Desired change in nanoseconds.
86 */
87static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
88{
89 tc->nsec += delta;
90}
91
92/**
93 * timecounter_init - initialize a time counter
94 * @tc: Pointer to time counter which is to be initialized/reset
95 * @cc: A cycle counter, ready to be used.
96 * @start_tstamp: Arbitrary initial time stamp.
97 *
98 * After this call the current cycle register (roughly) corresponds to
99 * the initial time stamp. Every call to timecounter_read() increments
100 * the time stamp counter by the number of elapsed nanoseconds.
101 */
102extern void timecounter_init(struct timecounter *tc,
103 struct cyclecounter *cc,
104 u64 start_tstamp);
105
106/**
107 * timecounter_read - return nanoseconds elapsed since timecounter_init()
108 * plus the initial time stamp
109 * @tc: Pointer to time counter.
110 *
111 * In other words, keeps track of time since the same epoch as
112 * the function which generated the initial time stamp.
113 *
114 * Returns: nanoseconds since the initial time stamp
115 */
116extern u64 timecounter_read(struct timecounter *tc);
117
118/*
119 * This is like cyclecounter_cyc2ns(), but it is used for computing a
120 * time previous to the time stored in the cycle counter.
121 */
122static inline u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, u64 cycles, u64 frac)
123{
124 return ((cycles * cc->mult) - frac) >> cc->shift;
125}
126
127/**
128 * timecounter_cyc2time - convert a cycle counter to same
129 * time base as values returned by
130 * timecounter_read()
131 * @tc: Pointer to time counter.
132 * @cycle_tstamp: a value returned by tc->cc->read()
133 *
134 * Cycle counts that are converted correctly as long as they
135 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
136 * with "max cycle count" == cs->mask+1.
137 *
138 * This allows conversion of cycle counter values which were generated
139 * in the past.
140 *
141 * Returns: cycle counter converted to nanoseconds since the initial time stamp
142 */
143static inline u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp)
144{
145 const struct cyclecounter *cc = tc->cc;
146 u64 delta = (cycle_tstamp - tc->cycle_last) & cc->mask;
147 u64 nsec = tc->nsec, frac = tc->frac;
148
149 /*
150 * Instead of always treating cycle_tstamp as more recent than
151 * tc->cycle_last, detect when it is too far in the future and
152 * treat it as old time stamp instead.
153 */
154 if (unlikely(delta > cc->mask / 2)) {
155 delta = (tc->cycle_last - cycle_tstamp) & cc->mask;
156 nsec -= cc_cyc2ns_backwards(cc, delta, frac);
157 } else {
158 nsec += cyclecounter_cyc2ns(cc, delta, tc->mask, &frac);
159 }
160
161 return nsec;
162}
163
164#endif