1 // SPDX-License-Identifier: GPL-2.0
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
6 #include <linux/clocksource.h>
7 #include <linux/init.h>
8 #include <linux/jiffies.h>
9 #include <linux/ktime.h>
10 #include <linux/kernel.h>
11 #include <linux/math.h>
12 #include <linux/moduleparam.h>
13 #include <linux/sched.h>
14 #include <linux/sched/clock.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/hrtimer.h>
17 #include <linux/sched_clock.h>
18 #include <linux/seqlock.h>
19 #include <linux/bitops.h>
21 #include "timekeeping.h"
24 * struct clock_data - all data needed for sched_clock() (including
25 * registration of a new clock source)
27 * @seq: Sequence counter for protecting updates. The lowest
28 * bit is the index for @read_data.
29 * @read_data: Data required to read from sched_clock.
30 * @wrap_kt: Duration for which clock can run before wrapping.
31 * @rate: Tick rate of the registered clock.
32 * @actual_read_sched_clock: Registered hardware level clock read function.
34 * The ordering of this structure has been chosen to optimize cache
35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
36 * into a single 64-byte cache line.
40 struct clock_read_data read_data[2];
44 u64 (*actual_read_sched_clock)(void);
47 static struct hrtimer sched_clock_timer;
48 static int irqtime = -1;
50 core_param(irqtime, irqtime, int, 0400);
52 static u64 notrace jiffy_sched_clock_read(void)
55 * We don't need to use get_jiffies_64 on 32-bit arches here
56 * because we register with BITS_PER_LONG
58 return (u64)(jiffies - INITIAL_JIFFIES);
61 static struct clock_data cd ____cacheline_aligned = {
62 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
63 .read_sched_clock = jiffy_sched_clock_read, },
64 .actual_read_sched_clock = jiffy_sched_clock_read,
67 static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
69 return (cyc * mult) >> shift;
72 notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
74 *seq = raw_read_seqcount_latch(&cd.seq);
75 return cd.read_data + (*seq & 1);
78 notrace int sched_clock_read_retry(unsigned int seq)
80 return raw_read_seqcount_latch_retry(&cd.seq, seq);
83 unsigned long long noinstr sched_clock_noinstr(void)
85 struct clock_read_data *rd;
90 seq = raw_read_seqcount_latch(&cd.seq);
91 rd = cd.read_data + (seq & 1);
93 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
96 } while (raw_read_seqcount_latch_retry(&cd.seq, seq));
101 unsigned long long notrace sched_clock(void)
103 unsigned long long ns;
104 preempt_disable_notrace();
105 ns = sched_clock_noinstr();
106 preempt_enable_notrace();
111 * Updating the data required to read the clock.
113 * sched_clock() will never observe mis-matched data even if called from
114 * an NMI. We do this by maintaining an odd/even copy of the data and
115 * steering sched_clock() to one or the other using a sequence counter.
116 * In order to preserve the data cache profile of sched_clock() as much
117 * as possible the system reverts back to the even copy when the update
118 * completes; the odd copy is used *only* during an update.
120 static void update_clock_read_data(struct clock_read_data *rd)
122 /* update the backup (odd) copy with the new data */
123 cd.read_data[1] = *rd;
125 /* steer readers towards the odd copy */
126 raw_write_seqcount_latch(&cd.seq);
128 /* now its safe for us to update the normal (even) copy */
129 cd.read_data[0] = *rd;
131 /* switch readers back to the even copy */
132 raw_write_seqcount_latch(&cd.seq);
136 * Atomically update the sched_clock() epoch.
138 static void update_sched_clock(void)
142 struct clock_read_data rd;
144 rd = cd.read_data[0];
146 cyc = cd.actual_read_sched_clock();
147 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
152 update_clock_read_data(&rd);
155 static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
157 update_sched_clock();
158 hrtimer_forward_now(hrt, cd.wrap_kt);
160 return HRTIMER_RESTART;
164 sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
166 u64 res, wrap, new_mask, new_epoch, cyc, ns;
167 u32 new_mult, new_shift;
168 unsigned long r, flags;
170 struct clock_read_data rd;
175 /* Cannot register a sched_clock with interrupts on */
176 local_irq_save(flags);
178 /* Calculate the mult/shift to convert counter ticks to ns. */
179 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
181 new_mask = CLOCKSOURCE_MASK(bits);
184 /* Calculate how many nanosecs until we risk wrapping */
185 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
186 cd.wrap_kt = ns_to_ktime(wrap);
188 rd = cd.read_data[0];
190 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
192 cyc = cd.actual_read_sched_clock();
193 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
194 cd.actual_read_sched_clock = read;
196 rd.read_sched_clock = read;
197 rd.sched_clock_mask = new_mask;
199 rd.shift = new_shift;
200 rd.epoch_cyc = new_epoch;
203 update_clock_read_data(&rd);
205 if (sched_clock_timer.function != NULL) {
206 /* update timeout for clock wrap */
207 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
208 HRTIMER_MODE_REL_HARD);
213 r = DIV_ROUND_CLOSEST(r, 1000000);
215 } else if (r >= 4000) {
216 r = DIV_ROUND_CLOSEST(r, 1000);
222 /* Calculate the ns resolution of this counter */
223 res = cyc_to_ns(1ULL, new_mult, new_shift);
225 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
226 bits, r, r_unit, res, wrap);
228 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
229 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
230 enable_sched_clock_irqtime();
232 local_irq_restore(flags);
234 pr_debug("Registered %pS as sched_clock source\n", read);
237 void __init generic_sched_clock_init(void)
240 * If no sched_clock() function has been provided at that point,
241 * make it the final one.
243 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
244 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
246 update_sched_clock();
249 * Start the timer to keep sched_clock() properly updated and
250 * sets the initial epoch.
252 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
253 sched_clock_timer.function = sched_clock_poll;
254 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
258 * Clock read function for use when the clock is suspended.
260 * This function makes it appear to sched_clock() as if the clock
261 * stopped counting at its last update.
263 * This function must only be called from the critical
264 * section in sched_clock(). It relies on the read_seqcount_retry()
265 * at the end of the critical section to be sure we observe the
266 * correct copy of 'epoch_cyc'.
268 static u64 notrace suspended_sched_clock_read(void)
270 unsigned int seq = raw_read_seqcount_latch(&cd.seq);
272 return cd.read_data[seq & 1].epoch_cyc;
275 int sched_clock_suspend(void)
277 struct clock_read_data *rd = &cd.read_data[0];
279 update_sched_clock();
280 hrtimer_cancel(&sched_clock_timer);
281 rd->read_sched_clock = suspended_sched_clock_read;
286 void sched_clock_resume(void)
288 struct clock_read_data *rd = &cd.read_data[0];
290 rd->epoch_cyc = cd.actual_read_sched_clock();
291 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
292 rd->read_sched_clock = cd.actual_read_sched_clock;
295 static struct syscore_ops sched_clock_ops = {
296 .suspend = sched_clock_suspend,
297 .resume = sched_clock_resume,
300 static int __init sched_clock_syscore_init(void)
302 register_syscore_ops(&sched_clock_ops);
306 device_initcall(sched_clock_syscore_init);