]>
Commit | Line | Data |
---|---|---|
c27f29bb TP |
1 | /* |
2 | * Copyright (C) 2016 Marvell | |
3 | * | |
4 | * Thomas Petazzoni <[email protected]> | |
5 | * | |
6 | * This file is licensed under the terms of the GNU General Public | |
7 | * License version 2. This program is licensed "as is" without any | |
8 | * warranty of any kind, whether express or implied. | |
9 | */ | |
10 | ||
11 | #define pr_fmt(fmt) "GIC-ODMI: " fmt | |
12 | ||
13 | #include <linux/irq.h> | |
14 | #include <linux/irqchip.h> | |
15 | #include <linux/irqdomain.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/msi.h> | |
18 | #include <linux/of_address.h> | |
19 | #include <linux/slab.h> | |
20 | #include <dt-bindings/interrupt-controller/arm-gic.h> | |
21 | ||
22 | #define GICP_ODMIN_SET 0x40 | |
23 | #define GICP_ODMI_INT_NUM_SHIFT 12 | |
24 | #define GICP_ODMIN_GM_EP_R0 0x110 | |
25 | #define GICP_ODMIN_GM_EP_R1 0x114 | |
26 | #define GICP_ODMIN_GM_EA_R0 0x108 | |
27 | #define GICP_ODMIN_GM_EA_R1 0x118 | |
28 | ||
29 | /* | |
30 | * We don't support the group events, so we simply have 8 interrupts | |
31 | * per frame. | |
32 | */ | |
33 | #define NODMIS_SHIFT 3 | |
34 | #define NODMIS_PER_FRAME (1 << NODMIS_SHIFT) | |
35 | #define NODMIS_MASK (NODMIS_PER_FRAME - 1) | |
36 | ||
37 | struct odmi_data { | |
38 | struct resource res; | |
39 | void __iomem *base; | |
40 | unsigned int spi_base; | |
41 | }; | |
42 | ||
43 | static struct odmi_data *odmis; | |
44 | static unsigned long *odmis_bm; | |
45 | static unsigned int odmis_count; | |
46 | ||
47 | /* Protects odmis_bm */ | |
48 | static DEFINE_SPINLOCK(odmis_bm_lock); | |
49 | ||
c27f29bb TP |
50 | static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
51 | { | |
52 | struct odmi_data *odmi; | |
53 | phys_addr_t addr; | |
54 | unsigned int odmin; | |
55 | ||
56 | if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME)) | |
57 | return; | |
58 | ||
59 | odmi = &odmis[d->hwirq >> NODMIS_SHIFT]; | |
60 | odmin = d->hwirq & NODMIS_MASK; | |
61 | ||
62 | addr = odmi->res.start + GICP_ODMIN_SET; | |
63 | ||
64 | msg->address_hi = upper_32_bits(addr); | |
65 | msg->address_lo = lower_32_bits(addr); | |
66 | msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT; | |
67 | } | |
68 | ||
69 | static struct irq_chip odmi_irq_chip = { | |
70 | .name = "ODMI", | |
71 | .irq_mask = irq_chip_mask_parent, | |
72 | .irq_unmask = irq_chip_unmask_parent, | |
73 | .irq_eoi = irq_chip_eoi_parent, | |
0407dace | 74 | .irq_set_affinity = irq_chip_set_affinity_parent, |
c27f29bb TP |
75 | .irq_compose_msi_msg = odmi_compose_msi_msg, |
76 | }; | |
77 | ||
78 | static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
79 | unsigned int nr_irqs, void *args) | |
80 | { | |
81 | struct odmi_data *odmi = NULL; | |
82 | struct irq_fwspec fwspec; | |
83 | struct irq_data *d; | |
84 | unsigned int hwirq, odmin; | |
85 | int ret; | |
86 | ||
87 | spin_lock(&odmis_bm_lock); | |
88 | hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count); | |
89 | if (hwirq >= NODMIS_PER_FRAME * odmis_count) { | |
90 | spin_unlock(&odmis_bm_lock); | |
91 | return -ENOSPC; | |
92 | } | |
93 | ||
94 | __set_bit(hwirq, odmis_bm); | |
95 | spin_unlock(&odmis_bm_lock); | |
96 | ||
97 | odmi = &odmis[hwirq >> NODMIS_SHIFT]; | |
98 | odmin = hwirq & NODMIS_MASK; | |
99 | ||
100 | fwspec.fwnode = domain->parent->fwnode; | |
101 | fwspec.param_count = 3; | |
102 | fwspec.param[0] = GIC_SPI; | |
103 | fwspec.param[1] = odmi->spi_base - 32 + odmin; | |
104 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | |
105 | ||
106 | ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); | |
107 | if (ret) { | |
108 | pr_err("Cannot allocate parent IRQ\n"); | |
109 | spin_lock(&odmis_bm_lock); | |
110 | __clear_bit(odmin, odmis_bm); | |
111 | spin_unlock(&odmis_bm_lock); | |
112 | return ret; | |
113 | } | |
114 | ||
115 | /* Configure the interrupt line to be edge */ | |
116 | d = irq_domain_get_irq_data(domain->parent, virq); | |
117 | d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); | |
118 | ||
119 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, | |
120 | &odmi_irq_chip, NULL); | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | static void odmi_irq_domain_free(struct irq_domain *domain, | |
126 | unsigned int virq, unsigned int nr_irqs) | |
127 | { | |
128 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | |
129 | ||
130 | if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) { | |
131 | pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq); | |
132 | return; | |
133 | } | |
134 | ||
135 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
136 | ||
137 | /* Actually free the MSI */ | |
138 | spin_lock(&odmis_bm_lock); | |
139 | __clear_bit(d->hwirq, odmis_bm); | |
140 | spin_unlock(&odmis_bm_lock); | |
141 | } | |
142 | ||
143 | static const struct irq_domain_ops odmi_domain_ops = { | |
144 | .alloc = odmi_irq_domain_alloc, | |
145 | .free = odmi_irq_domain_free, | |
146 | }; | |
147 | ||
148 | static struct irq_chip odmi_msi_irq_chip = { | |
149 | .name = "ODMI", | |
150 | }; | |
151 | ||
152 | static struct msi_domain_ops odmi_msi_ops = { | |
153 | }; | |
154 | ||
155 | static struct msi_domain_info odmi_msi_domain_info = { | |
156 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), | |
157 | .ops = &odmi_msi_ops, | |
158 | .chip = &odmi_msi_irq_chip, | |
159 | }; | |
160 | ||
161 | static int __init mvebu_odmi_init(struct device_node *node, | |
162 | struct device_node *parent) | |
163 | { | |
164 | struct irq_domain *inner_domain, *plat_domain; | |
165 | int ret, i; | |
166 | ||
167 | if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count)) | |
168 | return -EINVAL; | |
169 | ||
170 | odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL); | |
171 | if (!odmis) | |
172 | return -ENOMEM; | |
173 | ||
174 | odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME), | |
175 | sizeof(long), GFP_KERNEL); | |
176 | if (!odmis_bm) { | |
177 | ret = -ENOMEM; | |
178 | goto err_alloc; | |
179 | } | |
180 | ||
181 | for (i = 0; i < odmis_count; i++) { | |
182 | struct odmi_data *odmi = &odmis[i]; | |
183 | ||
184 | ret = of_address_to_resource(node, i, &odmi->res); | |
185 | if (ret) | |
186 | goto err_unmap; | |
187 | ||
188 | odmi->base = of_io_request_and_map(node, i, "odmi"); | |
189 | if (IS_ERR(odmi->base)) { | |
190 | ret = PTR_ERR(odmi->base); | |
191 | goto err_unmap; | |
192 | } | |
193 | ||
194 | if (of_property_read_u32_index(node, "marvell,spi-base", | |
195 | i, &odmi->spi_base)) { | |
196 | ret = -EINVAL; | |
197 | goto err_unmap; | |
198 | } | |
199 | } | |
200 | ||
201 | inner_domain = irq_domain_create_linear(of_node_to_fwnode(node), | |
202 | odmis_count * NODMIS_PER_FRAME, | |
203 | &odmi_domain_ops, NULL); | |
204 | if (!inner_domain) { | |
205 | ret = -ENOMEM; | |
206 | goto err_unmap; | |
207 | } | |
208 | ||
209 | inner_domain->parent = irq_find_host(parent); | |
210 | ||
211 | plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), | |
212 | &odmi_msi_domain_info, | |
213 | inner_domain); | |
214 | if (!plat_domain) { | |
215 | ret = -ENOMEM; | |
216 | goto err_remove_inner; | |
217 | } | |
218 | ||
219 | return 0; | |
220 | ||
221 | err_remove_inner: | |
222 | irq_domain_remove(inner_domain); | |
223 | err_unmap: | |
224 | for (i = 0; i < odmis_count; i++) { | |
225 | struct odmi_data *odmi = &odmis[i]; | |
226 | ||
227 | if (odmi->base && !IS_ERR(odmi->base)) | |
228 | iounmap(odmis[i].base); | |
229 | } | |
230 | kfree(odmis_bm); | |
231 | err_alloc: | |
232 | kfree(odmis); | |
233 | return ret; | |
234 | } | |
235 | ||
236 | IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init); |