1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 Google, Inc.
10 #include <linux/clockchips.h>
11 #include <linux/cpu.h>
12 #include <linux/cpumask.h>
13 #include <linux/delay.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/percpu.h>
19 #include <linux/sched_clock.h>
20 #include <linux/time.h>
25 #include <asm/mach/time.h>
28 #define RTC_SECONDS 0x08
29 #define RTC_SHADOW_SECONDS 0x0c
30 #define RTC_MILLISECONDS 0x10
32 #define TIMERUS_CNTR_1US 0x10
33 #define TIMERUS_USEC_CFG 0x14
34 #define TIMERUS_CNTR_FREEZE 0x4c
37 #define TIMER_PTV_EN BIT(31)
38 #define TIMER_PTV_PER BIT(30)
40 #define TIMER_PCR_INTR_CLR BIT(30)
43 #define TIMER_CPU0 0x50 /* TIMER3 */
45 #define TIMER_CPU0 0x90 /* TIMER10 */
46 #define TIMER10_IRQ_IDX 10
47 #define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu)
49 #define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8)
51 static u32 usec_config;
52 static void __iomem *timer_reg_base;
54 static struct delay_timer tegra_delay_timer;
57 static int tegra_timer_set_next_event(unsigned long cycles,
58 struct clock_event_device *evt)
60 void __iomem *reg_base = timer_of_base(to_timer_of(evt));
63 ((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */
64 reg_base + TIMER_PTV);
69 static int tegra_timer_shutdown(struct clock_event_device *evt)
71 void __iomem *reg_base = timer_of_base(to_timer_of(evt));
73 writel(0, reg_base + TIMER_PTV);
78 static int tegra_timer_set_periodic(struct clock_event_device *evt)
80 void __iomem *reg_base = timer_of_base(to_timer_of(evt));
82 writel(TIMER_PTV_EN | TIMER_PTV_PER |
83 ((timer_of_rate(to_timer_of(evt)) / HZ) - 1),
84 reg_base + TIMER_PTV);
89 static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
91 struct clock_event_device *evt = (struct clock_event_device *)dev_id;
92 void __iomem *reg_base = timer_of_base(to_timer_of(evt));
94 writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
95 evt->event_handler(evt);
100 static void tegra_timer_suspend(struct clock_event_device *evt)
102 void __iomem *reg_base = timer_of_base(to_timer_of(evt));
104 writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
107 static void tegra_timer_resume(struct clock_event_device *evt)
109 writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
113 static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
114 .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
117 .name = "tegra_timer",
119 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
120 .set_next_event = tegra_timer_set_next_event,
121 .set_state_shutdown = tegra_timer_shutdown,
122 .set_state_periodic = tegra_timer_set_periodic,
123 .set_state_oneshot = tegra_timer_shutdown,
124 .tick_resume = tegra_timer_shutdown,
125 .suspend = tegra_timer_suspend,
126 .resume = tegra_timer_resume,
130 static int tegra_timer_setup(unsigned int cpu)
132 struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
134 irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
135 enable_irq(to->clkevt.irq);
137 clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
139 0x1fffffff); /* 29 bits */
144 static int tegra_timer_stop(unsigned int cpu)
146 struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
148 to->clkevt.set_state_shutdown(&to->clkevt);
149 disable_irq_nosync(to->clkevt.irq);
153 #else /* CONFIG_ARM */
154 static struct timer_of tegra_to = {
155 .flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ,
158 .name = "tegra_timer",
160 .features = CLOCK_EVT_FEAT_ONESHOT |
161 CLOCK_EVT_FEAT_PERIODIC |
162 CLOCK_EVT_FEAT_DYNIRQ,
163 .set_next_event = tegra_timer_set_next_event,
164 .set_state_shutdown = tegra_timer_shutdown,
165 .set_state_periodic = tegra_timer_set_periodic,
166 .set_state_oneshot = tegra_timer_shutdown,
167 .tick_resume = tegra_timer_shutdown,
168 .suspend = tegra_timer_suspend,
169 .resume = tegra_timer_resume,
170 .cpumask = cpu_possible_mask,
175 .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
176 .handler = tegra_timer_isr,
180 static u64 notrace tegra_read_sched_clock(void)
182 return readl(timer_reg_base + TIMERUS_CNTR_1US);
185 static unsigned long tegra_delay_timer_read_counter_long(void)
187 return readl(timer_reg_base + TIMERUS_CNTR_1US);
190 static struct timer_of suspend_rtc_to = {
191 .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
195 * tegra_rtc_read - Reads the Tegra RTC registers
196 * Care must be taken that this funciton is not called while the
197 * tegra_rtc driver could be executing to avoid race conditions
198 * on the RTC shadow register
200 static u64 tegra_rtc_read_ms(struct clocksource *cs)
202 u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
203 u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
204 return (u64)s * MSEC_PER_SEC + ms;
207 static struct clocksource suspend_rtc_clocksource = {
208 .name = "tegra_suspend_timer",
210 .read = tegra_rtc_read_ms,
211 .mask = CLOCKSOURCE_MASK(32),
212 .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
216 static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
220 ret = timer_of_init(np, to);
224 timer_reg_base = timer_of_base(to);
227 * Configure microsecond timers to have 1MHz clock
228 * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
231 switch (timer_of_rate(to)) {
233 usec_config = 0x000b; /* (11+1)/(0+1) */
236 usec_config = 0x043f; /* (63+1)/(4+1) */
239 usec_config = 0x000c; /* (12+1)/(0+1) */
242 usec_config = 0x0453; /* (83+1)/(4+1) */
245 usec_config = 0x045f; /* (95+1)/(4+1) */
248 usec_config = 0x0019; /* (25+1)/(0+1) */
251 usec_config = 0x04bf; /* (191+1)/(4+1) */
254 usec_config = 0x002f; /* (47+1)/(0+1) */
261 writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG);
268 static int __init tegra_init_timer(struct device_node *np)
273 to = this_cpu_ptr(&tegra_to);
274 ret = tegra_timer_common_init(np, to);
278 for_each_possible_cpu(cpu) {
279 struct timer_of *cpu_to;
281 cpu_to = per_cpu_ptr(&tegra_to, cpu);
282 cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu);
283 cpu_to->of_clk.rate = timer_of_rate(to);
284 cpu_to->clkevt.cpumask = cpumask_of(cpu);
286 irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu));
287 if (!cpu_to->clkevt.irq) {
288 pr_err("%s: can't map IRQ for CPU%d\n",
294 irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
295 ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
296 IRQF_TIMER | IRQF_NOBALANCING,
297 cpu_to->clkevt.name, &cpu_to->clkevt);
299 pr_err("%s: cannot setup irq %d for CPU%d\n",
300 __func__, cpu_to->clkevt.irq, cpu);
306 cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
307 "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
312 for_each_possible_cpu(cpu) {
313 struct timer_of *cpu_to;
315 cpu_to = per_cpu_ptr(&tegra_to, cpu);
316 if (cpu_to->clkevt.irq) {
317 free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
318 irq_dispose_mapping(cpu_to->clkevt.irq);
322 timer_of_cleanup(to);
325 #else /* CONFIG_ARM */
326 static int __init tegra_init_timer(struct device_node *np)
330 ret = tegra_timer_common_init(np, &tegra_to);
334 tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0);
335 tegra_to.of_clk.rate = 1000000; /* microsecond timer */
337 sched_clock_register(tegra_read_sched_clock, 32,
338 timer_of_rate(&tegra_to));
339 ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
340 "timer_us", timer_of_rate(&tegra_to),
341 300, 32, clocksource_mmio_readl_up);
343 pr_err("Failed to register clocksource\n");
347 tegra_delay_timer.read_current_timer =
348 tegra_delay_timer_read_counter_long;
349 tegra_delay_timer.freq = timer_of_rate(&tegra_to);
350 register_current_timer_delay(&tegra_delay_timer);
352 clockevents_config_and_register(&tegra_to.clkevt,
353 timer_of_rate(&tegra_to),
359 timer_of_cleanup(&tegra_to);
364 static int __init tegra20_init_rtc(struct device_node *np)
368 ret = timer_of_init(np, &suspend_rtc_to);
372 clocksource_register_hz(&suspend_rtc_clocksource, 1000);
376 TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
378 TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer);
379 TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer);