]>
Commit | Line | Data |
---|---|---|
44e259ac RK |
1 | /* |
2 | * Marvell Dove PMU support | |
3 | */ | |
4 | #include <linux/io.h> | |
5 | #include <linux/irq.h> | |
6 | #include <linux/irqdomain.h> | |
7 | #include <linux/of.h> | |
8 | #include <linux/of_irq.h> | |
9 | #include <linux/of_address.h> | |
10 | #include <linux/platform_device.h> | |
11 | #include <linux/pm_domain.h> | |
12 | #include <linux/reset.h> | |
13 | #include <linux/reset-controller.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/soc/dove/pmu.h> | |
17 | #include <linux/spinlock.h> | |
18 | ||
19 | #define NR_PMU_IRQS 7 | |
20 | ||
21 | #define PMC_SW_RST 0x30 | |
22 | #define PMC_IRQ_CAUSE 0x50 | |
23 | #define PMC_IRQ_MASK 0x54 | |
24 | ||
25 | #define PMU_PWR 0x10 | |
26 | #define PMU_ISO 0x58 | |
27 | ||
28 | struct pmu_data { | |
29 | spinlock_t lock; | |
30 | struct device_node *of_node; | |
31 | void __iomem *pmc_base; | |
32 | void __iomem *pmu_base; | |
33 | struct irq_chip_generic *irq_gc; | |
34 | struct irq_domain *irq_domain; | |
35 | #ifdef CONFIG_RESET_CONTROLLER | |
36 | struct reset_controller_dev reset; | |
37 | #endif | |
38 | }; | |
39 | ||
40 | /* | |
41 | * The PMU contains a register to reset various subsystems within the | |
42 | * SoC. Export this as a reset controller. | |
43 | */ | |
44 | #ifdef CONFIG_RESET_CONTROLLER | |
45 | #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset) | |
46 | ||
47 | static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id) | |
48 | { | |
49 | struct pmu_data *pmu = rcdev_to_pmu(rc); | |
50 | unsigned long flags; | |
51 | u32 val; | |
52 | ||
53 | spin_lock_irqsave(&pmu->lock, flags); | |
54 | val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); | |
55 | writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); | |
56 | writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); | |
57 | spin_unlock_irqrestore(&pmu->lock, flags); | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id) | |
63 | { | |
64 | struct pmu_data *pmu = rcdev_to_pmu(rc); | |
65 | unsigned long flags; | |
66 | u32 val = ~BIT(id); | |
67 | ||
68 | spin_lock_irqsave(&pmu->lock, flags); | |
69 | val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST); | |
70 | writel_relaxed(val, pmu->pmc_base + PMC_SW_RST); | |
71 | spin_unlock_irqrestore(&pmu->lock, flags); | |
72 | ||
73 | return 0; | |
74 | } | |
75 | ||
76 | static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id) | |
77 | { | |
78 | struct pmu_data *pmu = rcdev_to_pmu(rc); | |
79 | unsigned long flags; | |
80 | u32 val = BIT(id); | |
81 | ||
82 | spin_lock_irqsave(&pmu->lock, flags); | |
83 | val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST); | |
84 | writel_relaxed(val, pmu->pmc_base + PMC_SW_RST); | |
85 | spin_unlock_irqrestore(&pmu->lock, flags); | |
86 | ||
87 | return 0; | |
88 | } | |
89 | ||
90 | static struct reset_control_ops pmu_reset_ops = { | |
91 | .reset = pmu_reset_reset, | |
92 | .assert = pmu_reset_assert, | |
93 | .deassert = pmu_reset_deassert, | |
94 | }; | |
95 | ||
96 | static struct reset_controller_dev pmu_reset __initdata = { | |
97 | .ops = &pmu_reset_ops, | |
98 | .owner = THIS_MODULE, | |
99 | .nr_resets = 32, | |
100 | }; | |
101 | ||
102 | static void __init pmu_reset_init(struct pmu_data *pmu) | |
103 | { | |
104 | int ret; | |
105 | ||
106 | pmu->reset = pmu_reset; | |
107 | pmu->reset.of_node = pmu->of_node; | |
108 | ||
109 | ret = reset_controller_register(&pmu->reset); | |
110 | if (ret) | |
111 | pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret); | |
112 | } | |
113 | #else | |
114 | static void __init pmu_reset_init(struct pmu_data *pmu) | |
115 | { | |
116 | } | |
117 | #endif | |
118 | ||
119 | struct pmu_domain { | |
120 | struct pmu_data *pmu; | |
121 | u32 pwr_mask; | |
122 | u32 rst_mask; | |
123 | u32 iso_mask; | |
124 | struct generic_pm_domain base; | |
125 | }; | |
126 | ||
127 | #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base) | |
128 | ||
129 | /* | |
130 | * This deals with the "old" Marvell sequence of bringing a power domain | |
131 | * down/up, which is: apply power, release reset, disable isolators. | |
132 | * | |
133 | * Later devices apparantly use a different sequence: power up, disable | |
134 | * isolators, assert repair signal, enable SRMA clock, enable AXI clock, | |
135 | * enable module clock, deassert reset. | |
136 | * | |
137 | * Note: reading the assembly, it seems that the IO accessors have an | |
138 | * unfortunate side-effect - they cause memory already read into registers | |
139 | * for the if () to be re-read for the bit-set or bit-clear operation. | |
140 | * The code is written to avoid this. | |
141 | */ | |
142 | static int pmu_domain_power_off(struct generic_pm_domain *domain) | |
143 | { | |
144 | struct pmu_domain *pmu_dom = to_pmu_domain(domain); | |
145 | struct pmu_data *pmu = pmu_dom->pmu; | |
146 | unsigned long flags; | |
147 | unsigned int val; | |
148 | void __iomem *pmu_base = pmu->pmu_base; | |
149 | void __iomem *pmc_base = pmu->pmc_base; | |
150 | ||
151 | spin_lock_irqsave(&pmu->lock, flags); | |
152 | ||
153 | /* Enable isolators */ | |
154 | if (pmu_dom->iso_mask) { | |
155 | val = ~pmu_dom->iso_mask; | |
156 | val &= readl_relaxed(pmu_base + PMU_ISO); | |
157 | writel_relaxed(val, pmu_base + PMU_ISO); | |
158 | } | |
159 | ||
160 | /* Reset unit */ | |
161 | if (pmu_dom->rst_mask) { | |
162 | val = ~pmu_dom->rst_mask; | |
163 | val &= readl_relaxed(pmc_base + PMC_SW_RST); | |
164 | writel_relaxed(val, pmc_base + PMC_SW_RST); | |
165 | } | |
166 | ||
167 | /* Power down */ | |
168 | val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask; | |
169 | writel_relaxed(val, pmu_base + PMU_PWR); | |
170 | ||
171 | spin_unlock_irqrestore(&pmu->lock, flags); | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | static int pmu_domain_power_on(struct generic_pm_domain *domain) | |
177 | { | |
178 | struct pmu_domain *pmu_dom = to_pmu_domain(domain); | |
179 | struct pmu_data *pmu = pmu_dom->pmu; | |
180 | unsigned long flags; | |
181 | unsigned int val; | |
182 | void __iomem *pmu_base = pmu->pmu_base; | |
183 | void __iomem *pmc_base = pmu->pmc_base; | |
184 | ||
185 | spin_lock_irqsave(&pmu->lock, flags); | |
186 | ||
187 | /* Power on */ | |
188 | val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR); | |
189 | writel_relaxed(val, pmu_base + PMU_PWR); | |
190 | ||
191 | /* Release reset */ | |
192 | if (pmu_dom->rst_mask) { | |
193 | val = pmu_dom->rst_mask; | |
194 | val |= readl_relaxed(pmc_base + PMC_SW_RST); | |
195 | writel_relaxed(val, pmc_base + PMC_SW_RST); | |
196 | } | |
197 | ||
198 | /* Disable isolators */ | |
199 | if (pmu_dom->iso_mask) { | |
200 | val = pmu_dom->iso_mask; | |
201 | val |= readl_relaxed(pmu_base + PMU_ISO); | |
202 | writel_relaxed(val, pmu_base + PMU_ISO); | |
203 | } | |
204 | ||
205 | spin_unlock_irqrestore(&pmu->lock, flags); | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static void __pmu_domain_register(struct pmu_domain *domain, | |
211 | struct device_node *np) | |
212 | { | |
213 | unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR); | |
214 | ||
215 | domain->base.power_off = pmu_domain_power_off; | |
216 | domain->base.power_on = pmu_domain_power_on; | |
217 | ||
218 | pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask)); | |
219 | ||
220 | if (np) | |
221 | of_genpd_add_provider_simple(np, &domain->base); | |
222 | } | |
223 | ||
224 | /* PMU IRQ controller */ | |
bd0b9ac4 | 225 | static void pmu_irq_handler(struct irq_desc *desc) |
44e259ac | 226 | { |
5230347e | 227 | struct pmu_data *pmu = irq_desc_get_handler_data(desc); |
44e259ac RK |
228 | struct irq_chip_generic *gc = pmu->irq_gc; |
229 | struct irq_domain *domain = pmu->irq_domain; | |
230 | void __iomem *base = gc->reg_base; | |
231 | u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache; | |
232 | u32 done = ~0; | |
233 | ||
234 | if (stat == 0) { | |
bd0b9ac4 | 235 | handle_bad_irq(desc); |
44e259ac RK |
236 | return; |
237 | } | |
238 | ||
239 | while (stat) { | |
240 | u32 hwirq = fls(stat) - 1; | |
241 | ||
242 | stat &= ~(1 << hwirq); | |
243 | done &= ~(1 << hwirq); | |
244 | ||
245 | generic_handle_irq(irq_find_mapping(domain, hwirq)); | |
246 | } | |
247 | ||
248 | /* | |
249 | * The PMU mask register is not RW0C: it is RW. This means that | |
250 | * the bits take whatever value is written to them; if you write | |
251 | * a '1', you will set the interrupt. | |
252 | * | |
253 | * Unfortunately this means there is NO race free way to clear | |
254 | * these interrupts. | |
255 | * | |
256 | * So, let's structure the code so that the window is as small as | |
257 | * possible. | |
258 | */ | |
259 | irq_gc_lock(gc); | |
260 | done &= readl_relaxed(base + PMC_IRQ_CAUSE); | |
261 | writel_relaxed(done, base + PMC_IRQ_CAUSE); | |
262 | irq_gc_unlock(gc); | |
263 | } | |
264 | ||
265 | static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq) | |
266 | { | |
267 | const char *name = "pmu_irq"; | |
268 | struct irq_chip_generic *gc; | |
269 | struct irq_domain *domain; | |
270 | int ret; | |
271 | ||
272 | /* mask and clear all interrupts */ | |
273 | writel(0, pmu->pmc_base + PMC_IRQ_MASK); | |
274 | writel(0, pmu->pmc_base + PMC_IRQ_CAUSE); | |
275 | ||
276 | domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS, | |
277 | &irq_generic_chip_ops, NULL); | |
278 | if (!domain) { | |
279 | pr_err("%s: unable to add irq domain\n", name); | |
280 | return -ENOMEM; | |
281 | } | |
282 | ||
283 | ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name, | |
284 | handle_level_irq, | |
285 | IRQ_NOREQUEST | IRQ_NOPROBE, 0, | |
286 | IRQ_GC_INIT_MASK_CACHE); | |
287 | if (ret) { | |
288 | pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret); | |
289 | irq_domain_remove(domain); | |
290 | return ret; | |
291 | } | |
292 | ||
293 | gc = irq_get_domain_generic_chip(domain, 0); | |
294 | gc->reg_base = pmu->pmc_base; | |
295 | gc->chip_types[0].regs.mask = PMC_IRQ_MASK; | |
296 | gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; | |
297 | gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; | |
298 | ||
299 | pmu->irq_domain = domain; | |
300 | pmu->irq_gc = gc; | |
301 | ||
302 | irq_set_handler_data(irq, pmu); | |
303 | irq_set_chained_handler(irq, pmu_irq_handler); | |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
67098119 RK |
308 | int __init dove_init_pmu_legacy(const struct dove_pmu_initdata *initdata) |
309 | { | |
310 | const struct dove_pmu_domain_initdata *domain_initdata; | |
311 | struct pmu_data *pmu; | |
312 | int ret; | |
313 | ||
314 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); | |
315 | if (!pmu) | |
316 | return -ENOMEM; | |
317 | ||
318 | spin_lock_init(&pmu->lock); | |
319 | pmu->pmc_base = initdata->pmc_base; | |
320 | pmu->pmu_base = initdata->pmu_base; | |
321 | ||
322 | pmu_reset_init(pmu); | |
323 | for (domain_initdata = initdata->domains; domain_initdata->name; | |
324 | domain_initdata++) { | |
325 | struct pmu_domain *domain; | |
326 | ||
327 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
328 | if (domain) { | |
329 | domain->pmu = pmu; | |
330 | domain->pwr_mask = domain_initdata->pwr_mask; | |
331 | domain->rst_mask = domain_initdata->rst_mask; | |
332 | domain->iso_mask = domain_initdata->iso_mask; | |
333 | domain->base.name = domain_initdata->name; | |
334 | ||
335 | __pmu_domain_register(domain, NULL); | |
336 | } | |
337 | } | |
338 | ||
339 | ret = dove_init_pmu_irq(pmu, initdata->irq); | |
340 | if (ret) | |
341 | pr_err("dove_init_pmu_irq() failed: %d\n", ret); | |
342 | ||
343 | if (pmu->irq_domain) | |
344 | irq_domain_associate_many(pmu->irq_domain, | |
345 | initdata->irq_domain_start, | |
346 | 0, NR_PMU_IRQS); | |
347 | ||
348 | return 0; | |
349 | } | |
350 | ||
44e259ac RK |
351 | /* |
352 | * pmu: power-manager@d0000 { | |
353 | * compatible = "marvell,dove-pmu"; | |
354 | * reg = <0xd0000 0x8000> <0xd8000 0x8000>; | |
355 | * interrupts = <33>; | |
356 | * interrupt-controller; | |
357 | * #reset-cells = 1; | |
358 | * vpu_domain: vpu-domain { | |
359 | * #power-domain-cells = <0>; | |
360 | * marvell,pmu_pwr_mask = <0x00000008>; | |
361 | * marvell,pmu_iso_mask = <0x00000001>; | |
362 | * resets = <&pmu 16>; | |
363 | * }; | |
364 | * gpu_domain: gpu-domain { | |
365 | * #power-domain-cells = <0>; | |
366 | * marvell,pmu_pwr_mask = <0x00000004>; | |
367 | * marvell,pmu_iso_mask = <0x00000002>; | |
368 | * resets = <&pmu 18>; | |
369 | * }; | |
370 | * }; | |
371 | */ | |
372 | int __init dove_init_pmu(void) | |
373 | { | |
374 | struct device_node *np_pmu, *domains_node, *np; | |
375 | struct pmu_data *pmu; | |
376 | int ret, parent_irq; | |
377 | ||
378 | /* Lookup the PMU node */ | |
379 | np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu"); | |
380 | if (!np_pmu) | |
381 | return 0; | |
382 | ||
383 | domains_node = of_get_child_by_name(np_pmu, "domains"); | |
384 | if (!domains_node) { | |
385 | pr_err("%s: failed to find domains sub-node\n", np_pmu->name); | |
386 | return 0; | |
387 | } | |
388 | ||
389 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); | |
390 | if (!pmu) | |
391 | return -ENOMEM; | |
392 | ||
393 | spin_lock_init(&pmu->lock); | |
394 | pmu->of_node = np_pmu; | |
395 | pmu->pmc_base = of_iomap(pmu->of_node, 0); | |
396 | pmu->pmu_base = of_iomap(pmu->of_node, 1); | |
397 | if (!pmu->pmc_base || !pmu->pmu_base) { | |
398 | pr_err("%s: failed to map PMU\n", np_pmu->name); | |
399 | iounmap(pmu->pmu_base); | |
400 | iounmap(pmu->pmc_base); | |
401 | kfree(pmu); | |
402 | return -ENOMEM; | |
403 | } | |
404 | ||
405 | pmu_reset_init(pmu); | |
406 | ||
407 | for_each_available_child_of_node(domains_node, np) { | |
408 | struct of_phandle_args args; | |
409 | struct pmu_domain *domain; | |
410 | ||
411 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
412 | if (!domain) | |
413 | break; | |
414 | ||
415 | domain->pmu = pmu; | |
416 | domain->base.name = kstrdup(np->name, GFP_KERNEL); | |
417 | if (!domain->base.name) { | |
418 | kfree(domain); | |
419 | break; | |
420 | } | |
421 | ||
422 | of_property_read_u32(np, "marvell,pmu_pwr_mask", | |
423 | &domain->pwr_mask); | |
424 | of_property_read_u32(np, "marvell,pmu_iso_mask", | |
425 | &domain->iso_mask); | |
426 | ||
427 | /* | |
428 | * We parse the reset controller property directly here | |
429 | * to ensure that we can operate when the reset controller | |
430 | * support is not configured into the kernel. | |
431 | */ | |
432 | ret = of_parse_phandle_with_args(np, "resets", "#reset-cells", | |
433 | 0, &args); | |
434 | if (ret == 0) { | |
435 | if (args.np == pmu->of_node) | |
436 | domain->rst_mask = BIT(args.args[0]); | |
437 | of_node_put(args.np); | |
438 | } | |
439 | ||
440 | __pmu_domain_register(domain, np); | |
441 | } | |
44e259ac RK |
442 | |
443 | /* Loss of the interrupt controller is not a fatal error. */ | |
444 | parent_irq = irq_of_parse_and_map(pmu->of_node, 0); | |
445 | if (!parent_irq) { | |
446 | pr_err("%s: no interrupt specified\n", np_pmu->name); | |
447 | } else { | |
448 | ret = dove_init_pmu_irq(pmu, parent_irq); | |
449 | if (ret) | |
450 | pr_err("dove_init_pmu_irq() failed: %d\n", ret); | |
451 | } | |
452 | ||
453 | return 0; | |
454 | } |