Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fix from Thomas Gleixner:
"Serialize the registration of a new sched_clock in the currently ARM
only generic sched_clock facilty to avoid sched_clock havoc"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched_clock: Prevent callers from seeing half-updated data

+29 -17
+29 -17
kernel/time/sched_clock.c
··· 116 116 void __init sched_clock_register(u64 (*read)(void), int bits, 117 117 unsigned long rate) 118 118 { 119 + u64 res, wrap, new_mask, new_epoch, cyc, ns; 120 + u32 new_mult, new_shift; 121 + ktime_t new_wrap_kt; 119 122 unsigned long r; 120 - u64 res, wrap; 121 123 char r_unit; 122 124 123 125 if (cd.rate > rate) 124 126 return; 125 127 126 128 WARN_ON(!irqs_disabled()); 127 - read_sched_clock = read; 128 - sched_clock_mask = CLOCKSOURCE_MASK(bits); 129 - cd.rate = rate; 130 129 131 130 /* calculate the mult/shift to convert counter ticks to ns. */ 132 - clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 131 + clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); 132 + 133 + new_mask = CLOCKSOURCE_MASK(bits); 134 + 135 + /* calculate how many ns until we wrap */ 136 + wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); 137 + new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 138 + 139 + /* update epoch for new counter and update epoch_ns from old counter*/ 140 + new_epoch = read(); 141 + cyc = read_sched_clock(); 142 + ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, 143 + cd.mult, cd.shift); 144 + 145 + raw_write_seqcount_begin(&cd.seq); 146 + read_sched_clock = read; 147 + sched_clock_mask = new_mask; 148 + cd.rate = rate; 149 + cd.wrap_kt = new_wrap_kt; 150 + cd.mult = new_mult; 151 + cd.shift = new_shift; 152 + cd.epoch_cyc = new_epoch; 153 + cd.epoch_ns = ns; 154 + raw_write_seqcount_end(&cd.seq); 133 155 134 156 r = rate; 135 157 if (r >= 4000000) { ··· 163 141 } else 164 142 r_unit = ' '; 165 143 166 - /* calculate how many ns until we wrap */ 167 - wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); 168 - cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 169 - 170 144 /* calculate the ns resolution of this counter */ 171 - res = cyc_to_ns(1ULL, cd.mult, cd.shift); 145 + res = cyc_to_ns(1ULL, new_mult, new_shift); 146 + 172 147 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 173 148 bits, r, r_unit, res, wrap); 174 - 175 - update_sched_clock(); 176 - 177 - /* 178 - * Ensure that sched_clock() starts off at 0ns 179 - */ 180 - cd.epoch_ns = 0; 181 149 182 150 /* Enable IRQ time accounting if we have a fast enough sched_clock */ 183 151 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))