]>
Commit | Line | Data |
---|---|---|
3fb1b6ad MD |
1 | /* |
2 | * SuperH Timer Support - CMT | |
3 | * | |
4 | * Copyright (C) 2008 Magnus Damm | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
3fb1b6ad MD |
21 | #include <linux/platform_device.h> |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/io.h> | |
26 | #include <linux/clk.h> | |
27 | #include <linux/irq.h> | |
28 | #include <linux/err.h> | |
29 | #include <linux/clocksource.h> | |
30 | #include <linux/clockchips.h> | |
46a12f74 | 31 | #include <linux/sh_timer.h> |
3fb1b6ad MD |
32 | |
33 | struct sh_cmt_priv { | |
34 | void __iomem *mapbase; | |
35 | struct clk *clk; | |
36 | unsigned long width; /* 16 or 32 bit version of hardware block */ | |
37 | unsigned long overflow_bit; | |
38 | unsigned long clear_bits; | |
39 | struct irqaction irqaction; | |
40 | struct platform_device *pdev; | |
41 | ||
42 | unsigned long flags; | |
43 | unsigned long match_value; | |
44 | unsigned long next_match_value; | |
45 | unsigned long max_match_value; | |
46 | unsigned long rate; | |
47 | spinlock_t lock; | |
48 | struct clock_event_device ced; | |
19bdc9d0 | 49 | struct clocksource cs; |
3fb1b6ad MD |
50 | unsigned long total_cycles; |
51 | }; | |
52 | ||
53 | static DEFINE_SPINLOCK(sh_cmt_lock); | |
54 | ||
55 | #define CMSTR -1 /* shared register */ | |
56 | #define CMCSR 0 /* channel register */ | |
57 | #define CMCNT 1 /* channel register */ | |
58 | #define CMCOR 2 /* channel register */ | |
59 | ||
60 | static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) | |
61 | { | |
46a12f74 | 62 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; |
3fb1b6ad MD |
63 | void __iomem *base = p->mapbase; |
64 | unsigned long offs; | |
65 | ||
66 | if (reg_nr == CMSTR) { | |
67 | offs = 0; | |
68 | base -= cfg->channel_offset; | |
69 | } else | |
70 | offs = reg_nr; | |
71 | ||
72 | if (p->width == 16) | |
73 | offs <<= 1; | |
74 | else { | |
75 | offs <<= 2; | |
76 | if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) | |
77 | return ioread32(base + offs); | |
78 | } | |
79 | ||
80 | return ioread16(base + offs); | |
81 | } | |
82 | ||
83 | static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, | |
84 | unsigned long value) | |
85 | { | |
46a12f74 | 86 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; |
3fb1b6ad MD |
87 | void __iomem *base = p->mapbase; |
88 | unsigned long offs; | |
89 | ||
90 | if (reg_nr == CMSTR) { | |
91 | offs = 0; | |
92 | base -= cfg->channel_offset; | |
93 | } else | |
94 | offs = reg_nr; | |
95 | ||
96 | if (p->width == 16) | |
97 | offs <<= 1; | |
98 | else { | |
99 | offs <<= 2; | |
100 | if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { | |
101 | iowrite32(value, base + offs); | |
102 | return; | |
103 | } | |
104 | } | |
105 | ||
106 | iowrite16(value, base + offs); | |
107 | } | |
108 | ||
109 | static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, | |
110 | int *has_wrapped) | |
111 | { | |
112 | unsigned long v1, v2, v3; | |
5b644c7a MD |
113 | int o1, o2; |
114 | ||
115 | o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; | |
3fb1b6ad MD |
116 | |
117 | /* Make sure the timer value is stable. Stolen from acpi_pm.c */ | |
118 | do { | |
5b644c7a | 119 | o2 = o1; |
3fb1b6ad MD |
120 | v1 = sh_cmt_read(p, CMCNT); |
121 | v2 = sh_cmt_read(p, CMCNT); | |
122 | v3 = sh_cmt_read(p, CMCNT); | |
5b644c7a MD |
123 | o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; |
124 | } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) | |
125 | || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); | |
3fb1b6ad | 126 | |
5b644c7a | 127 | *has_wrapped = o1; |
3fb1b6ad MD |
128 | return v2; |
129 | } | |
130 | ||
131 | ||
132 | static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |
133 | { | |
46a12f74 | 134 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; |
3fb1b6ad MD |
135 | unsigned long flags, value; |
136 | ||
137 | /* start stop register shared by multiple timer channels */ | |
138 | spin_lock_irqsave(&sh_cmt_lock, flags); | |
139 | value = sh_cmt_read(p, CMSTR); | |
140 | ||
141 | if (start) | |
142 | value |= 1 << cfg->timer_bit; | |
143 | else | |
144 | value &= ~(1 << cfg->timer_bit); | |
145 | ||
146 | sh_cmt_write(p, CMSTR, value); | |
147 | spin_unlock_irqrestore(&sh_cmt_lock, flags); | |
148 | } | |
149 | ||
150 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | |
151 | { | |
46a12f74 | 152 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; |
3fb1b6ad MD |
153 | int ret; |
154 | ||
155 | /* enable clock */ | |
156 | ret = clk_enable(p->clk); | |
157 | if (ret) { | |
158 | pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); | |
159 | return ret; | |
160 | } | |
3fb1b6ad MD |
161 | |
162 | /* make sure channel is disabled */ | |
163 | sh_cmt_start_stop_ch(p, 0); | |
164 | ||
165 | /* configure channel, periodic mode and maximum timeout */ | |
3014f474 MD |
166 | if (p->width == 16) { |
167 | *rate = clk_get_rate(p->clk) / 512; | |
168 | sh_cmt_write(p, CMCSR, 0x43); | |
169 | } else { | |
170 | *rate = clk_get_rate(p->clk) / 8; | |
3fb1b6ad | 171 | sh_cmt_write(p, CMCSR, 0x01a4); |
3014f474 | 172 | } |
3fb1b6ad MD |
173 | |
174 | sh_cmt_write(p, CMCOR, 0xffffffff); | |
175 | sh_cmt_write(p, CMCNT, 0); | |
176 | ||
177 | /* enable channel */ | |
178 | sh_cmt_start_stop_ch(p, 1); | |
179 | return 0; | |
180 | } | |
181 | ||
182 | static void sh_cmt_disable(struct sh_cmt_priv *p) | |
183 | { | |
184 | /* disable channel */ | |
185 | sh_cmt_start_stop_ch(p, 0); | |
186 | ||
187 | /* stop clock */ | |
188 | clk_disable(p->clk); | |
189 | } | |
190 | ||
191 | /* private flags */ | |
192 | #define FLAG_CLOCKEVENT (1 << 0) | |
193 | #define FLAG_CLOCKSOURCE (1 << 1) | |
194 | #define FLAG_REPROGRAM (1 << 2) | |
195 | #define FLAG_SKIPEVENT (1 << 3) | |
196 | #define FLAG_IRQCONTEXT (1 << 4) | |
197 | ||
198 | static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, | |
199 | int absolute) | |
200 | { | |
201 | unsigned long new_match; | |
202 | unsigned long value = p->next_match_value; | |
203 | unsigned long delay = 0; | |
204 | unsigned long now = 0; | |
205 | int has_wrapped; | |
206 | ||
207 | now = sh_cmt_get_counter(p, &has_wrapped); | |
208 | p->flags |= FLAG_REPROGRAM; /* force reprogram */ | |
209 | ||
210 | if (has_wrapped) { | |
211 | /* we're competing with the interrupt handler. | |
212 | * -> let the interrupt handler reprogram the timer. | |
213 | * -> interrupt number two handles the event. | |
214 | */ | |
215 | p->flags |= FLAG_SKIPEVENT; | |
216 | return; | |
217 | } | |
218 | ||
219 | if (absolute) | |
220 | now = 0; | |
221 | ||
222 | do { | |
223 | /* reprogram the timer hardware, | |
224 | * but don't save the new match value yet. | |
225 | */ | |
226 | new_match = now + value + delay; | |
227 | if (new_match > p->max_match_value) | |
228 | new_match = p->max_match_value; | |
229 | ||
230 | sh_cmt_write(p, CMCOR, new_match); | |
231 | ||
232 | now = sh_cmt_get_counter(p, &has_wrapped); | |
233 | if (has_wrapped && (new_match > p->match_value)) { | |
234 | /* we are changing to a greater match value, | |
235 | * so this wrap must be caused by the counter | |
236 | * matching the old value. | |
237 | * -> first interrupt reprograms the timer. | |
238 | * -> interrupt number two handles the event. | |
239 | */ | |
240 | p->flags |= FLAG_SKIPEVENT; | |
241 | break; | |
242 | } | |
243 | ||
244 | if (has_wrapped) { | |
245 | /* we are changing to a smaller match value, | |
246 | * so the wrap must be caused by the counter | |
247 | * matching the new value. | |
248 | * -> save programmed match value. | |
249 | * -> let isr handle the event. | |
250 | */ | |
251 | p->match_value = new_match; | |
252 | break; | |
253 | } | |
254 | ||
255 | /* be safe: verify hardware settings */ | |
256 | if (now < new_match) { | |
257 | /* timer value is below match value, all good. | |
258 | * this makes sure we won't miss any match events. | |
259 | * -> save programmed match value. | |
260 | * -> let isr handle the event. | |
261 | */ | |
262 | p->match_value = new_match; | |
263 | break; | |
264 | } | |
265 | ||
266 | /* the counter has reached a value greater | |
267 | * than our new match value. and since the | |
268 | * has_wrapped flag isn't set we must have | |
269 | * programmed a too close event. | |
270 | * -> increase delay and retry. | |
271 | */ | |
272 | if (delay) | |
273 | delay <<= 1; | |
274 | else | |
275 | delay = 1; | |
276 | ||
277 | if (!delay) | |
278 | pr_warning("sh_cmt: too long delay\n"); | |
279 | ||
280 | } while (delay); | |
281 | } | |
282 | ||
283 | static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) | |
284 | { | |
285 | unsigned long flags; | |
286 | ||
287 | if (delta > p->max_match_value) | |
288 | pr_warning("sh_cmt: delta out of range\n"); | |
289 | ||
290 | spin_lock_irqsave(&p->lock, flags); | |
291 | p->next_match_value = delta; | |
292 | sh_cmt_clock_event_program_verify(p, 0); | |
293 | spin_unlock_irqrestore(&p->lock, flags); | |
294 | } | |
295 | ||
296 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) | |
297 | { | |
298 | struct sh_cmt_priv *p = dev_id; | |
299 | ||
300 | /* clear flags */ | |
301 | sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); | |
302 | ||
303 | /* update clock source counter to begin with if enabled | |
304 | * the wrap flag should be cleared by the timer specific | |
305 | * isr before we end up here. | |
306 | */ | |
307 | if (p->flags & FLAG_CLOCKSOURCE) | |
308 | p->total_cycles += p->match_value; | |
309 | ||
310 | if (!(p->flags & FLAG_REPROGRAM)) | |
311 | p->next_match_value = p->max_match_value; | |
312 | ||
313 | p->flags |= FLAG_IRQCONTEXT; | |
314 | ||
315 | if (p->flags & FLAG_CLOCKEVENT) { | |
316 | if (!(p->flags & FLAG_SKIPEVENT)) { | |
317 | if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { | |
318 | p->next_match_value = p->max_match_value; | |
319 | p->flags |= FLAG_REPROGRAM; | |
320 | } | |
321 | ||
322 | p->ced.event_handler(&p->ced); | |
323 | } | |
324 | } | |
325 | ||
326 | p->flags &= ~FLAG_SKIPEVENT; | |
327 | ||
328 | if (p->flags & FLAG_REPROGRAM) { | |
329 | p->flags &= ~FLAG_REPROGRAM; | |
330 | sh_cmt_clock_event_program_verify(p, 1); | |
331 | ||
332 | if (p->flags & FLAG_CLOCKEVENT) | |
333 | if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) | |
334 | || (p->match_value == p->next_match_value)) | |
335 | p->flags &= ~FLAG_REPROGRAM; | |
336 | } | |
337 | ||
338 | p->flags &= ~FLAG_IRQCONTEXT; | |
339 | ||
340 | return IRQ_HANDLED; | |
341 | } | |
342 | ||
343 | static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |
344 | { | |
345 | int ret = 0; | |
346 | unsigned long flags; | |
347 | ||
348 | spin_lock_irqsave(&p->lock, flags); | |
349 | ||
350 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | |
351 | ret = sh_cmt_enable(p, &p->rate); | |
352 | ||
353 | if (ret) | |
354 | goto out; | |
355 | p->flags |= flag; | |
356 | ||
357 | /* setup timeout if no clockevent */ | |
358 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) | |
359 | sh_cmt_set_next(p, p->max_match_value); | |
360 | out: | |
361 | spin_unlock_irqrestore(&p->lock, flags); | |
362 | ||
363 | return ret; | |
364 | } | |
365 | ||
366 | static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |
367 | { | |
368 | unsigned long flags; | |
369 | unsigned long f; | |
370 | ||
371 | spin_lock_irqsave(&p->lock, flags); | |
372 | ||
373 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); | |
374 | p->flags &= ~flag; | |
375 | ||
376 | if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | |
377 | sh_cmt_disable(p); | |
378 | ||
379 | /* adjust the timeout to maximum if only clocksource left */ | |
380 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) | |
381 | sh_cmt_set_next(p, p->max_match_value); | |
382 | ||
383 | spin_unlock_irqrestore(&p->lock, flags); | |
384 | } | |
385 | ||
19bdc9d0 MD |
386 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) |
387 | { | |
388 | return container_of(cs, struct sh_cmt_priv, cs); | |
389 | } | |
390 | ||
391 | static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) | |
392 | { | |
393 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); | |
394 | unsigned long flags, raw; | |
395 | unsigned long value; | |
396 | int has_wrapped; | |
397 | ||
398 | spin_lock_irqsave(&p->lock, flags); | |
399 | value = p->total_cycles; | |
400 | raw = sh_cmt_get_counter(p, &has_wrapped); | |
401 | ||
402 | if (unlikely(has_wrapped)) | |
5b644c7a | 403 | raw += p->match_value; |
19bdc9d0 MD |
404 | spin_unlock_irqrestore(&p->lock, flags); |
405 | ||
406 | return value + raw; | |
407 | } | |
408 | ||
409 | static int sh_cmt_clocksource_enable(struct clocksource *cs) | |
410 | { | |
411 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); | |
412 | int ret; | |
413 | ||
414 | p->total_cycles = 0; | |
415 | ||
416 | ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); | |
417 | if (ret) | |
418 | return ret; | |
419 | ||
420 | /* TODO: calculate good shift from rate and counter bit width */ | |
421 | cs->shift = 0; | |
422 | cs->mult = clocksource_hz2mult(p->rate, cs->shift); | |
423 | return 0; | |
424 | } | |
425 | ||
426 | static void sh_cmt_clocksource_disable(struct clocksource *cs) | |
427 | { | |
428 | sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); | |
429 | } | |
430 | ||
431 | static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, | |
432 | char *name, unsigned long rating) | |
433 | { | |
434 | struct clocksource *cs = &p->cs; | |
435 | ||
436 | cs->name = name; | |
437 | cs->rating = rating; | |
438 | cs->read = sh_cmt_clocksource_read; | |
439 | cs->enable = sh_cmt_clocksource_enable; | |
440 | cs->disable = sh_cmt_clocksource_disable; | |
441 | cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); | |
442 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | |
443 | pr_info("sh_cmt: %s used as clock source\n", cs->name); | |
444 | clocksource_register(cs); | |
445 | return 0; | |
446 | } | |
447 | ||
3fb1b6ad MD |
448 | static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) |
449 | { | |
450 | return container_of(ced, struct sh_cmt_priv, ced); | |
451 | } | |
452 | ||
453 | static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) | |
454 | { | |
455 | struct clock_event_device *ced = &p->ced; | |
456 | ||
457 | sh_cmt_start(p, FLAG_CLOCKEVENT); | |
458 | ||
459 | /* TODO: calculate good shift from rate and counter bit width */ | |
460 | ||
461 | ced->shift = 32; | |
462 | ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); | |
463 | ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); | |
464 | ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); | |
465 | ||
466 | if (periodic) | |
467 | sh_cmt_set_next(p, (p->rate + HZ/2) / HZ); | |
468 | else | |
469 | sh_cmt_set_next(p, p->max_match_value); | |
470 | } | |
471 | ||
472 | static void sh_cmt_clock_event_mode(enum clock_event_mode mode, | |
473 | struct clock_event_device *ced) | |
474 | { | |
475 | struct sh_cmt_priv *p = ced_to_sh_cmt(ced); | |
476 | ||
477 | /* deal with old setting first */ | |
478 | switch (ced->mode) { | |
479 | case CLOCK_EVT_MODE_PERIODIC: | |
480 | case CLOCK_EVT_MODE_ONESHOT: | |
481 | sh_cmt_stop(p, FLAG_CLOCKEVENT); | |
482 | break; | |
483 | default: | |
484 | break; | |
485 | } | |
486 | ||
487 | switch (mode) { | |
488 | case CLOCK_EVT_MODE_PERIODIC: | |
489 | pr_info("sh_cmt: %s used for periodic clock events\n", | |
490 | ced->name); | |
491 | sh_cmt_clock_event_start(p, 1); | |
492 | break; | |
493 | case CLOCK_EVT_MODE_ONESHOT: | |
494 | pr_info("sh_cmt: %s used for oneshot clock events\n", | |
495 | ced->name); | |
496 | sh_cmt_clock_event_start(p, 0); | |
497 | break; | |
498 | case CLOCK_EVT_MODE_SHUTDOWN: | |
499 | case CLOCK_EVT_MODE_UNUSED: | |
500 | sh_cmt_stop(p, FLAG_CLOCKEVENT); | |
501 | break; | |
502 | default: | |
503 | break; | |
504 | } | |
505 | } | |
506 | ||
507 | static int sh_cmt_clock_event_next(unsigned long delta, | |
508 | struct clock_event_device *ced) | |
509 | { | |
510 | struct sh_cmt_priv *p = ced_to_sh_cmt(ced); | |
511 | ||
512 | BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); | |
513 | if (likely(p->flags & FLAG_IRQCONTEXT)) | |
514 | p->next_match_value = delta; | |
515 | else | |
516 | sh_cmt_set_next(p, delta); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, | |
522 | char *name, unsigned long rating) | |
523 | { | |
524 | struct clock_event_device *ced = &p->ced; | |
525 | ||
526 | memset(ced, 0, sizeof(*ced)); | |
527 | ||
528 | ced->name = name; | |
529 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | |
530 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; | |
531 | ced->rating = rating; | |
532 | ced->cpumask = cpumask_of(0); | |
533 | ced->set_next_event = sh_cmt_clock_event_next; | |
534 | ced->set_mode = sh_cmt_clock_event_mode; | |
535 | ||
536 | pr_info("sh_cmt: %s used for clock events\n", ced->name); | |
3fb1b6ad MD |
537 | clockevents_register_device(ced); |
538 | } | |
539 | ||
d1fcc0a8 PM |
540 | static int sh_cmt_register(struct sh_cmt_priv *p, char *name, |
541 | unsigned long clockevent_rating, | |
542 | unsigned long clocksource_rating) | |
3fb1b6ad MD |
543 | { |
544 | if (p->width == (sizeof(p->max_match_value) * 8)) | |
545 | p->max_match_value = ~0; | |
546 | else | |
547 | p->max_match_value = (1 << p->width) - 1; | |
548 | ||
549 | p->match_value = p->max_match_value; | |
550 | spin_lock_init(&p->lock); | |
551 | ||
552 | if (clockevent_rating) | |
553 | sh_cmt_register_clockevent(p, name, clockevent_rating); | |
554 | ||
19bdc9d0 MD |
555 | if (clocksource_rating) |
556 | sh_cmt_register_clocksource(p, name, clocksource_rating); | |
557 | ||
3fb1b6ad MD |
558 | return 0; |
559 | } | |
560 | ||
561 | static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |
562 | { | |
46a12f74 | 563 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
3fb1b6ad MD |
564 | struct resource *res; |
565 | int irq, ret; | |
566 | ret = -ENXIO; | |
567 | ||
568 | memset(p, 0, sizeof(*p)); | |
569 | p->pdev = pdev; | |
570 | ||
571 | if (!cfg) { | |
572 | dev_err(&p->pdev->dev, "missing platform data\n"); | |
573 | goto err0; | |
574 | } | |
575 | ||
576 | platform_set_drvdata(pdev, p); | |
577 | ||
578 | res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); | |
579 | if (!res) { | |
580 | dev_err(&p->pdev->dev, "failed to get I/O memory\n"); | |
581 | goto err0; | |
582 | } | |
583 | ||
584 | irq = platform_get_irq(p->pdev, 0); | |
585 | if (irq < 0) { | |
586 | dev_err(&p->pdev->dev, "failed to get irq\n"); | |
587 | goto err0; | |
588 | } | |
589 | ||
590 | /* map memory, let mapbase point to our channel */ | |
591 | p->mapbase = ioremap_nocache(res->start, resource_size(res)); | |
592 | if (p->mapbase == NULL) { | |
593 | pr_err("sh_cmt: failed to remap I/O memory\n"); | |
594 | goto err0; | |
595 | } | |
596 | ||
597 | /* request irq using setup_irq() (too early for request_irq()) */ | |
598 | p->irqaction.name = cfg->name; | |
599 | p->irqaction.handler = sh_cmt_interrupt; | |
600 | p->irqaction.dev_id = p; | |
601 | p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; | |
602 | p->irqaction.mask = CPU_MASK_NONE; | |
603 | ret = setup_irq(irq, &p->irqaction); | |
604 | if (ret) { | |
605 | pr_err("sh_cmt: failed to request irq %d\n", irq); | |
606 | goto err1; | |
607 | } | |
608 | ||
609 | /* get hold of clock */ | |
610 | p->clk = clk_get(&p->pdev->dev, cfg->clk); | |
611 | if (IS_ERR(p->clk)) { | |
612 | pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); | |
613 | ret = PTR_ERR(p->clk); | |
614 | goto err2; | |
615 | } | |
616 | ||
617 | if (resource_size(res) == 6) { | |
618 | p->width = 16; | |
619 | p->overflow_bit = 0x80; | |
3014f474 | 620 | p->clear_bits = ~0x80; |
3fb1b6ad MD |
621 | } else { |
622 | p->width = 32; | |
623 | p->overflow_bit = 0x8000; | |
624 | p->clear_bits = ~0xc000; | |
625 | } | |
626 | ||
627 | return sh_cmt_register(p, cfg->name, | |
628 | cfg->clockevent_rating, | |
629 | cfg->clocksource_rating); | |
630 | err2: | |
3093e78e | 631 | remove_irq(irq, &p->irqaction); |
3fb1b6ad MD |
632 | err1: |
633 | iounmap(p->mapbase); | |
634 | err0: | |
635 | return ret; | |
636 | } | |
637 | ||
638 | static int __devinit sh_cmt_probe(struct platform_device *pdev) | |
639 | { | |
640 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); | |
46a12f74 | 641 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
3fb1b6ad MD |
642 | int ret; |
643 | ||
e475eedb MD |
644 | if (p) { |
645 | pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name); | |
646 | return 0; | |
647 | } | |
648 | ||
8e0b8429 | 649 | p = kmalloc(sizeof(*p), GFP_KERNEL); |
3fb1b6ad MD |
650 | if (p == NULL) { |
651 | dev_err(&pdev->dev, "failed to allocate driver data\n"); | |
652 | return -ENOMEM; | |
653 | } | |
654 | ||
655 | ret = sh_cmt_setup(p, pdev); | |
656 | if (ret) { | |
8e0b8429 | 657 | kfree(p); |
3fb1b6ad MD |
658 | platform_set_drvdata(pdev, NULL); |
659 | } | |
660 | return ret; | |
661 | } | |
662 | ||
663 | static int __devexit sh_cmt_remove(struct platform_device *pdev) | |
664 | { | |
665 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | |
666 | } | |
667 | ||
668 | static struct platform_driver sh_cmt_device_driver = { | |
669 | .probe = sh_cmt_probe, | |
670 | .remove = __devexit_p(sh_cmt_remove), | |
671 | .driver = { | |
672 | .name = "sh_cmt", | |
673 | } | |
674 | }; | |
675 | ||
676 | static int __init sh_cmt_init(void) | |
677 | { | |
678 | return platform_driver_register(&sh_cmt_device_driver); | |
679 | } | |
680 | ||
681 | static void __exit sh_cmt_exit(void) | |
682 | { | |
683 | platform_driver_unregister(&sh_cmt_device_driver); | |
684 | } | |
685 | ||
e475eedb | 686 | early_platform_init("earlytimer", &sh_cmt_device_driver); |
3fb1b6ad MD |
687 | module_init(sh_cmt_init); |
688 | module_exit(sh_cmt_exit); | |
689 | ||
690 | MODULE_AUTHOR("Magnus Damm"); | |
691 | MODULE_DESCRIPTION("SuperH CMT Timer Driver"); | |
692 | MODULE_LICENSE("GPL v2"); |