1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Common time routines among all ppc machines.
6 * Paul Mackerras' version and mine for PReP and Pmac.
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
16 * Speeded up do_gettimeofday by getting rid of references to
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time.
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/sched/clock.h>
34 #include <linux/sched/cputime.h>
35 #include <linux/kernel.h>
36 #include <linux/param.h>
37 #include <linux/string.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/kernel_stat.h>
42 #include <linux/time.h>
43 #include <linux/init.h>
44 #include <linux/profile.h>
45 #include <linux/cpu.h>
46 #include <linux/security.h>
47 #include <linux/percpu.h>
48 #include <linux/rtc.h>
49 #include <linux/jiffies.h>
50 #include <linux/posix-timers.h>
51 #include <linux/irq.h>
52 #include <linux/delay.h>
53 #include <linux/irq_work.h>
54 #include <linux/of_clk.h>
55 #include <linux/suspend.h>
56 #include <linux/processor.h>
57 #include <linux/mc146818rtc.h>
58 #include <linux/platform_device.h>
60 #include <asm/trace.h>
61 #include <asm/interrupt.h>
63 #include <asm/nvram.h>
64 #include <asm/cache.h>
65 #include <asm/machdep.h>
66 #include <linux/uaccess.h>
69 #include <asm/div64.h>
71 #include <asm/vdso_datapage.h>
72 #include <asm/firmware.h>
75 /* powerpc clocksource/clockevent code */
77 #include <linux/clockchips.h>
78 #include <linux/timekeeper_internal.h>
80 static u64 timebase_read(struct clocksource *);
81 static struct clocksource clocksource_timebase = {
84 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
85 .mask = CLOCKSOURCE_MASK(64),
86 .read = timebase_read,
87 .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
90 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
91 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
92 EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
94 static int decrementer_set_next_event(unsigned long evt,
95 struct clock_event_device *dev);
96 static int decrementer_shutdown(struct clock_event_device *evt);
98 struct clock_event_device decrementer_clockevent = {
99 .name = "decrementer",
102 .set_next_event = decrementer_set_next_event,
103 .set_state_oneshot_stopped = decrementer_shutdown,
104 .set_state_shutdown = decrementer_shutdown,
105 .tick_resume = decrementer_shutdown,
106 .features = CLOCK_EVT_FEAT_ONESHOT |
107 CLOCK_EVT_FEAT_C3STOP,
109 EXPORT_SYMBOL(decrementer_clockevent);
112 * This always puts next_tb beyond now, so the clock event will never fire
113 * with the usual comparison, no need for a separate test for stopped.
115 #define DEC_CLOCKEVENT_STOPPED ~0ULL
116 DEFINE_PER_CPU(u64, decrementers_next_tb) = DEC_CLOCKEVENT_STOPPED;
117 EXPORT_SYMBOL_GPL(decrementers_next_tb);
118 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
120 #define XSEC_PER_SEC (1024*1024)
123 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
125 /* compute ((xsec << 12) * max) >> 32 */
126 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
129 unsigned long tb_ticks_per_jiffy;
130 unsigned long tb_ticks_per_usec = 100; /* sane default */
131 EXPORT_SYMBOL(tb_ticks_per_usec);
132 unsigned long tb_ticks_per_sec;
133 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime conversions */
135 DEFINE_SPINLOCK(rtc_lock);
136 EXPORT_SYMBOL_GPL(rtc_lock);
138 static u64 tb_to_ns_scale __read_mostly;
139 static unsigned tb_to_ns_shift __read_mostly;
140 static u64 boot_tb __read_mostly;
142 extern struct timezone sys_tz;
143 static long timezone_offset;
145 unsigned long ppc_proc_freq;
146 EXPORT_SYMBOL_GPL(ppc_proc_freq);
147 unsigned long ppc_tb_freq;
148 EXPORT_SYMBOL_GPL(ppc_tb_freq);
152 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
154 * Read the SPURR on systems that have it, otherwise the PURR,
155 * or if that doesn't exist return the timebase value passed in.
157 static inline unsigned long read_spurr(unsigned long tb)
159 if (cpu_has_feature(CPU_FTR_SPURR))
160 return mfspr(SPRN_SPURR);
161 if (cpu_has_feature(CPU_FTR_PURR))
162 return mfspr(SPRN_PURR);
167 * Account time for a transition between system, hard irq
170 static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
171 unsigned long now, unsigned long stime)
173 unsigned long stime_scaled = 0;
174 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
175 unsigned long nowscaled, deltascaled;
176 unsigned long utime, utime_scaled;
178 nowscaled = read_spurr(now);
179 deltascaled = nowscaled - acct->startspurr;
180 acct->startspurr = nowscaled;
181 utime = acct->utime - acct->utime_sspurr;
182 acct->utime_sspurr = acct->utime;
185 * Because we don't read the SPURR on every kernel entry/exit,
186 * deltascaled includes both user and system SPURR ticks.
187 * Apportion these ticks to system SPURR ticks and user
188 * SPURR ticks in the same ratio as the system time (delta)
189 * and user time (udelta) values obtained from the timebase
190 * over the same interval. The system ticks get accounted here;
191 * the user ticks get saved up in paca->user_time_scaled to be
192 * used by account_process_tick.
194 stime_scaled = stime;
195 utime_scaled = utime;
196 if (deltascaled != stime + utime) {
198 stime_scaled = deltascaled * stime / (stime + utime);
199 utime_scaled = deltascaled - stime_scaled;
201 stime_scaled = deltascaled;
204 acct->utime_scaled += utime_scaled;
210 static unsigned long vtime_delta(struct cpu_accounting_data *acct,
211 unsigned long *stime_scaled,
212 unsigned long *steal_time)
214 unsigned long now, stime;
216 WARN_ON_ONCE(!irqs_disabled());
219 stime = now - acct->starttime;
220 acct->starttime = now;
222 *stime_scaled = vtime_delta_scaled(acct, now, stime);
224 if (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
225 firmware_has_feature(FW_FEATURE_SPLPAR))
226 *steal_time = pseries_calculate_stolen_time(now);
233 static void vtime_delta_kernel(struct cpu_accounting_data *acct,
234 unsigned long *stime, unsigned long *stime_scaled)
236 unsigned long steal_time;
238 *stime = vtime_delta(acct, stime_scaled, &steal_time);
239 *stime -= min(*stime, steal_time);
240 acct->steal_time += steal_time;
243 void vtime_account_kernel(struct task_struct *tsk)
245 struct cpu_accounting_data *acct = get_accounting(tsk);
246 unsigned long stime, stime_scaled;
248 vtime_delta_kernel(acct, &stime, &stime_scaled);
250 if (tsk->flags & PF_VCPU) {
251 acct->gtime += stime;
252 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
253 acct->utime_scaled += stime_scaled;
256 acct->stime += stime;
257 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
258 acct->stime_scaled += stime_scaled;
262 EXPORT_SYMBOL_GPL(vtime_account_kernel);
264 void vtime_account_idle(struct task_struct *tsk)
266 unsigned long stime, stime_scaled, steal_time;
267 struct cpu_accounting_data *acct = get_accounting(tsk);
269 stime = vtime_delta(acct, &stime_scaled, &steal_time);
270 acct->idle_time += stime + steal_time;
273 static void vtime_account_irq_field(struct cpu_accounting_data *acct,
274 unsigned long *field)
276 unsigned long stime, stime_scaled;
278 vtime_delta_kernel(acct, &stime, &stime_scaled);
280 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
281 acct->stime_scaled += stime_scaled;
285 void vtime_account_softirq(struct task_struct *tsk)
287 struct cpu_accounting_data *acct = get_accounting(tsk);
288 vtime_account_irq_field(acct, &acct->softirq_time);
291 void vtime_account_hardirq(struct task_struct *tsk)
293 struct cpu_accounting_data *acct = get_accounting(tsk);
294 vtime_account_irq_field(acct, &acct->hardirq_time);
297 static void vtime_flush_scaled(struct task_struct *tsk,
298 struct cpu_accounting_data *acct)
300 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
301 if (acct->utime_scaled)
302 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
303 if (acct->stime_scaled)
304 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
306 acct->utime_scaled = 0;
307 acct->utime_sspurr = 0;
308 acct->stime_scaled = 0;
313 * Account the whole cputime accumulated in the paca
314 * Must be called with interrupts disabled.
315 * Assumes that vtime_account_kernel/idle() has been called
316 * recently (i.e. since the last entry from usermode) so that
317 * get_paca()->user_time_scaled is up to date.
319 void vtime_flush(struct task_struct *tsk)
321 struct cpu_accounting_data *acct = get_accounting(tsk);
324 account_user_time(tsk, cputime_to_nsecs(acct->utime));
327 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
329 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
330 account_steal_time(cputime_to_nsecs(acct->steal_time));
331 acct->steal_time = 0;
335 account_idle_time(cputime_to_nsecs(acct->idle_time));
338 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
341 if (acct->hardirq_time)
342 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
344 if (acct->softirq_time)
345 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
348 vtime_flush_scaled(tsk, acct);
354 acct->hardirq_time = 0;
355 acct->softirq_time = 0;
359 * Called from the context switch with interrupts disabled, to charge all
360 * accumulated times to the current process, and to prepare accounting on
363 void vtime_task_switch(struct task_struct *prev)
365 if (is_idle_task(prev))
366 vtime_account_idle(prev);
368 vtime_account_kernel(prev);
372 if (!IS_ENABLED(CONFIG_PPC64)) {
373 struct cpu_accounting_data *acct = get_accounting(current);
374 struct cpu_accounting_data *acct0 = get_accounting(prev);
376 acct->starttime = acct0->starttime;
379 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
381 void __no_kcsan __delay(unsigned long loops)
388 * TB is in error state and isn't ticking anymore.
389 * HMI handler was unable to recover from TB error.
390 * Return immediately, so that kernel won't get stuck here.
395 while (mftb() - start < loops)
400 EXPORT_SYMBOL(__delay);
402 void __no_kcsan udelay(unsigned long usecs)
404 __delay(tb_ticks_per_usec * usecs);
406 EXPORT_SYMBOL(udelay);
409 unsigned long profile_pc(struct pt_regs *regs)
411 unsigned long pc = instruction_pointer(regs);
413 if (in_lock_functions(pc))
418 EXPORT_SYMBOL(profile_pc);
421 #ifdef CONFIG_IRQ_WORK
424 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
427 static inline unsigned long test_irq_work_pending(void)
431 asm volatile("lbz %0,%1(13)"
433 : "i" (offsetof(struct paca_struct, irq_work_pending)));
437 static inline void set_irq_work_pending_flag(void)
439 asm volatile("stb %0,%1(13)" : :
441 "i" (offsetof(struct paca_struct, irq_work_pending)));
444 static inline void clear_irq_work_pending(void)
446 asm volatile("stb %0,%1(13)" : :
448 "i" (offsetof(struct paca_struct, irq_work_pending)));
453 DEFINE_PER_CPU(u8, irq_work_pending);
455 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
456 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
457 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
459 #endif /* 32 vs 64 bit */
461 void arch_irq_work_raise(void)
464 * 64-bit code that uses irq soft-mask can just cause an immediate
465 * interrupt here that gets soft masked, if this is called under
466 * local_irq_disable(). It might be possible to prevent that happening
467 * by noticing interrupts are disabled and setting decrementer pending
468 * to be replayed when irqs are enabled. The problem there is that
469 * tracing can call irq_work_raise, including in code that does low
470 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
471 * which could get tangled up if we're messing with the same state
475 set_irq_work_pending_flag();
480 static void set_dec_or_work(u64 val)
483 /* We may have raced with new irq work */
484 if (unlikely(test_irq_work_pending()))
488 #else /* CONFIG_IRQ_WORK */
490 #define test_irq_work_pending() 0
491 #define clear_irq_work_pending()
493 static void set_dec_or_work(u64 val)
497 #endif /* CONFIG_IRQ_WORK */
499 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
500 void timer_rearm_host_dec(u64 now)
502 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
504 WARN_ON_ONCE(!arch_irqs_disabled());
505 WARN_ON_ONCE(mfmsr() & MSR_EE);
507 if (now >= *next_tb) {
508 local_paca->irq_happened |= PACA_IRQ_DEC;
510 now = *next_tb - now;
511 if (now > decrementer_max)
512 now = decrementer_max;
513 set_dec_or_work(now);
516 EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
520 * timer_interrupt - gets called when the decrementer overflows,
521 * with interrupts disabled.
523 DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
525 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
526 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
527 struct pt_regs *old_regs;
531 * Some implementations of hotplug will get timer interrupts while
532 * offline, just ignore these.
534 if (unlikely(!cpu_online(smp_processor_id()))) {
535 set_dec(decrementer_max);
539 /* Conditionally hard-enable interrupts. */
540 if (should_hard_irq_enable(regs)) {
542 * Ensure a positive value is written to the decrementer, or
543 * else some CPUs will continue to take decrementer exceptions.
544 * When the PPC_WATCHDOG (decrementer based) is configured,
545 * keep this at most 31 bits, which is about 4 seconds on most
546 * systems, which gives the watchdog a chance of catching timer
547 * interrupt hard lockups.
549 if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
552 set_dec(decrementer_max);
554 do_hard_irq_enable();
557 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
558 if (atomic_read(&ppc_n_lost_interrupts) != 0)
562 old_regs = set_irq_regs(regs);
564 trace_timer_interrupt_entry(regs);
566 if (test_irq_work_pending()) {
567 clear_irq_work_pending();
568 mce_run_irq_context_handlers();
573 if (now >= *next_tb) {
574 evt->event_handler(evt);
575 __this_cpu_inc(irq_stat.timer_irqs_event);
577 now = *next_tb - now;
578 if (now > decrementer_max)
579 now = decrementer_max;
580 set_dec_or_work(now);
581 __this_cpu_inc(irq_stat.timer_irqs_others);
584 trace_timer_interrupt_exit(regs);
586 set_irq_regs(old_regs);
588 EXPORT_SYMBOL(timer_interrupt);
590 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
591 void timer_broadcast_interrupt(void)
593 tick_receive_broadcast();
594 __this_cpu_inc(irq_stat.broadcast_irqs_event);
598 #ifdef CONFIG_SUSPEND
599 /* Overrides the weak version in kernel/power/main.c */
600 void arch_suspend_disable_irqs(void)
602 if (ppc_md.suspend_disable_irqs)
603 ppc_md.suspend_disable_irqs();
605 /* Disable the decrementer, so that it doesn't interfere
609 set_dec(decrementer_max);
611 set_dec(decrementer_max);
614 /* Overrides the weak version in kernel/power/main.c */
615 void arch_suspend_enable_irqs(void)
619 if (ppc_md.suspend_enable_irqs)
620 ppc_md.suspend_enable_irqs();
624 unsigned long long tb_to_ns(unsigned long long ticks)
626 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
628 EXPORT_SYMBOL_GPL(tb_to_ns);
631 * Scheduler clock - returns current time in nanosec units.
633 * Note: mulhdu(a, b) (multiply high double unsigned) returns
634 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
635 * are 64-bit unsigned numbers.
637 notrace unsigned long long sched_clock(void)
639 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
643 #ifdef CONFIG_PPC_PSERIES
646 * Running clock - attempts to give a view of time passing for a virtualised
648 * Uses the VTB register if available otherwise a next best guess.
650 unsigned long long running_clock(void)
653 * Don't read the VTB as a host since KVM does not switch in host
654 * timebase into the VTB when it takes a guest off the CPU, reading the
655 * VTB would result in reading 'last switched out' guest VTB.
657 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
658 * would be unsafe to rely only on the #ifdef above.
660 if (firmware_has_feature(FW_FEATURE_LPAR) &&
661 cpu_has_feature(CPU_FTR_ARCH_207S))
662 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
665 * This is a next best approximation without a VTB.
666 * On a host which is running bare metal there should never be any stolen
667 * time and on a host which doesn't do any virtualisation TB *should* equal
668 * VTB so it makes no difference anyway.
670 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
674 static int __init get_freq(char *name, int cells, unsigned long *val)
676 struct device_node *cpu;
680 /* The cpu node should have timebase and clock frequency properties */
681 cpu = of_find_node_by_type(NULL, "cpu");
684 fp = of_get_property(cpu, name, NULL);
687 *val = of_read_ulong(fp, cells);
696 static void start_cpu_decrementer(void)
698 #ifdef CONFIG_BOOKE_OR_40x
701 /* Clear any pending timer interrupts */
702 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
704 tcr = mfspr(SPRN_TCR);
706 * The watchdog may have already been enabled by u-boot. So leave
707 * TRC[WP] (Watchdog Period) alone.
709 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
710 tcr |= TCR_DIE; /* Enable decrementer */
711 mtspr(SPRN_TCR, tcr);
715 void __init generic_calibrate_decr(void)
717 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
719 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
720 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
722 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
726 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
728 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
729 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
731 printk(KERN_ERR "WARNING: Estimating processor frequency "
736 int update_persistent_clock64(struct timespec64 now)
740 if (!ppc_md.set_rtc_time)
743 rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
745 return ppc_md.set_rtc_time(&tm);
748 static void __read_persistent_clock(struct timespec64 *ts)
751 static int first = 1;
754 /* XXX this is a little fragile but will work okay in the short term */
757 if (ppc_md.time_init)
758 timezone_offset = ppc_md.time_init();
760 /* get_boot_time() isn't guaranteed to be safe to call late */
761 if (ppc_md.get_boot_time) {
762 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
766 if (!ppc_md.get_rtc_time) {
770 ppc_md.get_rtc_time(&tm);
772 ts->tv_sec = rtc_tm_to_time64(&tm);
775 void read_persistent_clock64(struct timespec64 *ts)
777 __read_persistent_clock(ts);
779 /* Sanitize it in case real time clock is set below EPOCH */
780 if (ts->tv_sec < 0) {
787 /* clocksource code */
788 static notrace u64 timebase_read(struct clocksource *cs)
790 return (u64)get_tb();
793 static void __init clocksource_init(void)
795 struct clocksource *clock = &clocksource_timebase;
797 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
798 printk(KERN_ERR "clocksource: %s is already registered\n",
803 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
804 clock->name, clock->mult, clock->shift);
807 static int decrementer_set_next_event(unsigned long evt,
808 struct clock_event_device *dev)
810 __this_cpu_write(decrementers_next_tb, get_tb() + evt);
811 set_dec_or_work(evt);
816 static int decrementer_shutdown(struct clock_event_device *dev)
818 __this_cpu_write(decrementers_next_tb, DEC_CLOCKEVENT_STOPPED);
819 set_dec_or_work(decrementer_max);
824 static void register_decrementer_clockevent(int cpu)
826 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
828 *dec = decrementer_clockevent;
829 dec->cpumask = cpumask_of(cpu);
831 clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
833 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
834 dec->name, dec->mult, dec->shift, cpu);
836 /* Set values for KVM, see kvm_emulate_dec() */
837 decrementer_clockevent.mult = dec->mult;
838 decrementer_clockevent.shift = dec->shift;
841 static void enable_large_decrementer(void)
843 if (!cpu_has_feature(CPU_FTR_ARCH_300))
846 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
850 * If we're running as the hypervisor we need to enable the LD manually
851 * otherwise firmware should have done it for us.
853 if (cpu_has_feature(CPU_FTR_HVMODE))
854 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
857 static void __init set_decrementer_max(void)
859 struct device_node *cpu;
862 /* Prior to ISAv3 the decrementer is always 32 bit */
863 if (!cpu_has_feature(CPU_FTR_ARCH_300))
866 cpu = of_find_node_by_type(NULL, "cpu");
868 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
869 if (bits > 64 || bits < 32) {
870 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
874 /* calculate the signed maximum given this many bits */
875 decrementer_max = (1ul << (bits - 1)) - 1;
880 pr_info("time_init: %u bit decrementer (max: %llx)\n",
881 bits, decrementer_max);
884 static void __init init_decrementer_clockevent(void)
886 register_decrementer_clockevent(smp_processor_id());
889 void secondary_cpu_time_init(void)
891 /* Enable and test the large decrementer for this cpu */
892 enable_large_decrementer();
894 /* Start the decrementer on CPUs that have manual control
897 start_cpu_decrementer();
899 /* FIME: Should make unrelated change to move snapshot_timebase
901 register_decrementer_clockevent(smp_processor_id());
904 /* This function is only called on the boot processor */
905 void __init time_init(void)
907 struct div_result res;
911 /* Normal PowerPC with timebase register */
912 if (ppc_md.calibrate_decr)
913 ppc_md.calibrate_decr();
915 generic_calibrate_decr();
917 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
918 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
919 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
920 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
922 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
923 tb_ticks_per_sec = ppc_tb_freq;
924 tb_ticks_per_usec = ppc_tb_freq / 1000000;
927 * Compute scale factor for sched_clock.
928 * The calibrate_decr() function has set tb_ticks_per_sec,
929 * which is the timebase frequency.
930 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
931 * the 128-bit result as a 64.64 fixed-point number.
932 * We then shift that number right until it is less than 1.0,
933 * giving us the scale factor and shift count to use in
936 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
937 scale = res.result_low;
938 for (shift = 0; res.result_high != 0; ++shift) {
939 scale = (scale >> 1) | (res.result_high << 63);
940 res.result_high >>= 1;
942 tb_to_ns_scale = scale;
943 tb_to_ns_shift = shift;
944 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
947 /* If platform provided a timezone (pmac), we correct the time */
948 if (timezone_offset) {
949 sys_tz.tz_minuteswest = -timezone_offset / 60;
950 sys_tz.tz_dsttime = 0;
953 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
955 /* initialise and enable the large decrementer (if we have one) */
956 set_decrementer_max();
957 enable_large_decrementer();
959 /* Start the decrementer on CPUs that have manual control
962 start_cpu_decrementer();
964 /* Register the clocksource */
967 init_decrementer_clockevent();
968 tick_setup_hrtimer_broadcast();
971 enable_sched_clock_irqtime();
975 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
978 void div128_by_32(u64 dividend_high, u64 dividend_low,
979 unsigned divisor, struct div_result *dr)
981 unsigned long a, b, c, d;
982 unsigned long w, x, y, z;
985 a = dividend_high >> 32;
986 b = dividend_high & 0xffffffff;
987 c = dividend_low >> 32;
988 d = dividend_low & 0xffffffff;
991 ra = ((u64)(a - (w * divisor)) << 32) + b;
993 rb = ((u64) do_div(ra, divisor) << 32) + c;
996 rc = ((u64) do_div(rb, divisor) << 32) + d;
1002 dr->result_high = ((u64)w << 32) + x;
1003 dr->result_low = ((u64)y << 32) + z;
1007 /* We don't need to calibrate delay, we use the CPU timebase for that */
1008 void calibrate_delay(void)
1010 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1011 * as the number of __delay(1) in a jiffy, so make it so
1013 loops_per_jiffy = tb_ticks_per_jiffy;
1016 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1017 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1019 ppc_md.get_rtc_time(tm);
1023 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1025 if (!ppc_md.set_rtc_time)
1028 if (ppc_md.set_rtc_time(tm) < 0)
1034 static const struct rtc_class_ops rtc_generic_ops = {
1035 .read_time = rtc_generic_get_time,
1036 .set_time = rtc_generic_set_time,
1039 static int __init rtc_init(void)
1041 struct platform_device *pdev;
1043 if (!ppc_md.get_rtc_time)
1046 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1048 sizeof(rtc_generic_ops));
1050 return PTR_ERR_OR_ZERO(pdev);
1053 device_initcall(rtc_init);