]>
Commit | Line | Data |
---|---|---|
54a90588 PM |
1 | #define pr_fmt(fmt) "irq: " fmt |
2 | ||
cc79ca69 GL |
3 | #include <linux/debugfs.h> |
4 | #include <linux/hardirq.h> | |
5 | #include <linux/interrupt.h> | |
08a543ad | 6 | #include <linux/irq.h> |
cc79ca69 | 7 | #include <linux/irqdesc.h> |
08a543ad GL |
8 | #include <linux/irqdomain.h> |
9 | #include <linux/module.h> | |
10 | #include <linux/mutex.h> | |
11 | #include <linux/of.h> | |
7e713301 | 12 | #include <linux/of_address.h> |
64be38ab | 13 | #include <linux/of_irq.h> |
5ca4db61 | 14 | #include <linux/topology.h> |
cc79ca69 | 15 | #include <linux/seq_file.h> |
7e713301 | 16 | #include <linux/slab.h> |
cc79ca69 GL |
17 | #include <linux/smp.h> |
18 | #include <linux/fs.h> | |
08a543ad GL |
19 | |
20 | static LIST_HEAD(irq_domain_list); | |
21 | static DEFINE_MUTEX(irq_domain_mutex); | |
22 | ||
cc79ca69 | 23 | static DEFINE_MUTEX(revmap_trees_mutex); |
68700650 | 24 | static struct irq_domain *irq_default_domain; |
cc79ca69 | 25 | |
f8264e34 JL |
26 | static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, |
27 | irq_hw_number_t hwirq, int node); | |
28 | static void irq_domain_check_hierarchy(struct irq_domain *domain); | |
29 | ||
b145dcc4 MZ |
30 | struct irqchip_fwid { |
31 | struct fwnode_handle fwnode; | |
32 | char *name; | |
33 | void *data; | |
34 | }; | |
35 | ||
36 | /** | |
37 | * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for | |
38 | * identifying an irq domain | |
39 | * @data: optional user-provided data | |
40 | * | |
41 | * Allocate a struct device_node, and return a poiner to the embedded | |
42 | * fwnode_handle (or NULL on failure). | |
43 | */ | |
44 | struct fwnode_handle *irq_domain_alloc_fwnode(void *data) | |
45 | { | |
46 | struct irqchip_fwid *fwid; | |
47 | char *name; | |
48 | ||
49 | fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); | |
50 | name = kasprintf(GFP_KERNEL, "irqchip@%p", data); | |
51 | ||
52 | if (!fwid || !name) { | |
53 | kfree(fwid); | |
54 | kfree(name); | |
55 | return NULL; | |
56 | } | |
57 | ||
58 | fwid->name = name; | |
59 | fwid->data = data; | |
60 | fwid->fwnode.type = FWNODE_IRQCHIP; | |
61 | return &fwid->fwnode; | |
62 | } | |
63 | ||
64 | /** | |
65 | * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle | |
66 | * | |
67 | * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. | |
68 | */ | |
69 | void irq_domain_free_fwnode(struct fwnode_handle *fwnode) | |
70 | { | |
71 | struct irqchip_fwid *fwid; | |
72 | ||
73 | if (WARN_ON(fwnode->type != FWNODE_IRQCHIP)) | |
74 | return; | |
75 | ||
76 | fwid = container_of(fwnode, struct irqchip_fwid, fwnode); | |
77 | kfree(fwid->name); | |
78 | kfree(fwid); | |
79 | } | |
80 | ||
cc79ca69 | 81 | /** |
fa40f377 | 82 | * __irq_domain_add() - Allocate a new irq_domain data structure |
cc79ca69 | 83 | * @of_node: optional device-tree node of the interrupt controller |
fa40f377 | 84 | * @size: Size of linear map; 0 for radix mapping only |
a257954b | 85 | * @hwirq_max: Maximum number of interrupts supported by controller |
fa40f377 GL |
86 | * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no |
87 | * direct mapping | |
f8264e34 | 88 | * @ops: domain callbacks |
a8db8cf0 | 89 | * @host_data: Controller private data pointer |
cc79ca69 | 90 | * |
a257954b JL |
91 | * Allocates and initialize and irq_domain structure. |
92 | * Returns pointer to IRQ domain, or NULL on failure. | |
cc79ca69 | 93 | */ |
1bf4ddc4 | 94 | struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, |
ddaf144c | 95 | irq_hw_number_t hwirq_max, int direct_max, |
fa40f377 GL |
96 | const struct irq_domain_ops *ops, |
97 | void *host_data) | |
cc79ca69 | 98 | { |
a8db8cf0 | 99 | struct irq_domain *domain; |
1bf4ddc4 MZ |
100 | struct device_node *of_node; |
101 | ||
102 | of_node = to_of_node(fwnode); | |
cc79ca69 | 103 | |
cef5075c GL |
104 | domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), |
105 | GFP_KERNEL, of_node_to_nid(of_node)); | |
a8db8cf0 | 106 | if (WARN_ON(!domain)) |
cc79ca69 GL |
107 | return NULL; |
108 | ||
f110711a | 109 | of_node_get(of_node); |
f110711a | 110 | |
cc79ca69 | 111 | /* Fill structure */ |
1aa0dd94 | 112 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); |
68700650 | 113 | domain->ops = ops; |
a8db8cf0 | 114 | domain->host_data = host_data; |
f110711a | 115 | domain->fwnode = fwnode; |
ddaf144c | 116 | domain->hwirq_max = hwirq_max; |
1aa0dd94 | 117 | domain->revmap_size = size; |
fa40f377 | 118 | domain->revmap_direct_max_irq = direct_max; |
f8264e34 | 119 | irq_domain_check_hierarchy(domain); |
cc79ca69 | 120 | |
a8db8cf0 GL |
121 | mutex_lock(&irq_domain_mutex); |
122 | list_add(&domain->link, &irq_domain_list); | |
123 | mutex_unlock(&irq_domain_mutex); | |
fa40f377 | 124 | |
1aa0dd94 | 125 | pr_debug("Added domain %s\n", domain->name); |
fa40f377 | 126 | return domain; |
a8db8cf0 | 127 | } |
fa40f377 | 128 | EXPORT_SYMBOL_GPL(__irq_domain_add); |
a8db8cf0 | 129 | |
58ee99ad PM |
130 | /** |
131 | * irq_domain_remove() - Remove an irq domain. | |
132 | * @domain: domain to remove | |
133 | * | |
134 | * This routine is used to remove an irq domain. The caller must ensure | |
135 | * that all mappings within the domain have been disposed of prior to | |
136 | * use, depending on the revmap type. | |
137 | */ | |
138 | void irq_domain_remove(struct irq_domain *domain) | |
139 | { | |
140 | mutex_lock(&irq_domain_mutex); | |
141 | ||
cef5075c GL |
142 | /* |
143 | * radix_tree_delete() takes care of destroying the root | |
144 | * node when all entries are removed. Shout if there are | |
145 | * any mappings left. | |
146 | */ | |
1aa0dd94 | 147 | WARN_ON(domain->revmap_tree.height); |
58ee99ad PM |
148 | |
149 | list_del(&domain->link); | |
150 | ||
151 | /* | |
152 | * If the going away domain is the default one, reset it. | |
153 | */ | |
154 | if (unlikely(irq_default_domain == domain)) | |
155 | irq_set_default_host(NULL); | |
156 | ||
157 | mutex_unlock(&irq_domain_mutex); | |
158 | ||
1aa0dd94 | 159 | pr_debug("Removed domain %s\n", domain->name); |
58ee99ad | 160 | |
5d4c9bc7 | 161 | of_node_put(irq_domain_get_of_node(domain)); |
fa40f377 | 162 | kfree(domain); |
58ee99ad | 163 | } |
ecd84eb2 | 164 | EXPORT_SYMBOL_GPL(irq_domain_remove); |
58ee99ad | 165 | |
781d0f46 | 166 | /** |
fa40f377 | 167 | * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs |
781d0f46 MB |
168 | * @of_node: pointer to interrupt controller's device tree node. |
169 | * @size: total number of irqs in mapping | |
94a63da0 | 170 | * @first_irq: first number of irq block assigned to the domain, |
fa40f377 GL |
171 | * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then |
172 | * pre-map all of the irqs in the domain to virqs starting at first_irq. | |
f8264e34 | 173 | * @ops: domain callbacks |
781d0f46 MB |
174 | * @host_data: Controller private data pointer |
175 | * | |
fa40f377 GL |
176 | * Allocates an irq_domain, and optionally if first_irq is positive then also |
177 | * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. | |
781d0f46 MB |
178 | * |
179 | * This is intended to implement the expected behaviour for most | |
fa40f377 GL |
180 | * interrupt controllers. If device tree is used, then first_irq will be 0 and |
181 | * irqs get mapped dynamically on the fly. However, if the controller requires | |
182 | * static virq assignments (non-DT boot) then it will set that up correctly. | |
781d0f46 MB |
183 | */ |
184 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | |
185 | unsigned int size, | |
186 | unsigned int first_irq, | |
187 | const struct irq_domain_ops *ops, | |
188 | void *host_data) | |
189 | { | |
fa40f377 GL |
190 | struct irq_domain *domain; |
191 | ||
1bf4ddc4 | 192 | domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); |
fa40f377 GL |
193 | if (!domain) |
194 | return NULL; | |
2854d167 | 195 | |
fa40f377 | 196 | if (first_irq > 0) { |
2854d167 | 197 | if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { |
fa40f377 GL |
198 | /* attempt to allocated irq_descs */ |
199 | int rc = irq_alloc_descs(first_irq, first_irq, size, | |
200 | of_node_to_nid(of_node)); | |
201 | if (rc < 0) | |
d202b7b9 LW |
202 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
203 | first_irq); | |
fa40f377 | 204 | } |
ddaf144c | 205 | irq_domain_associate_many(domain, first_irq, 0, size); |
2854d167 LW |
206 | } |
207 | ||
fa40f377 | 208 | return domain; |
781d0f46 | 209 | } |
346dbb79 | 210 | EXPORT_SYMBOL_GPL(irq_domain_add_simple); |
781d0f46 | 211 | |
a8db8cf0 GL |
212 | /** |
213 | * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. | |
214 | * @of_node: pointer to interrupt controller's device tree node. | |
1bc04f2c GL |
215 | * @size: total number of irqs in legacy mapping |
216 | * @first_irq: first number of irq block assigned to the domain | |
217 | * @first_hwirq: first hwirq number to use for the translation. Should normally | |
218 | * be '0', but a positive integer can be used if the effective | |
219 | * hwirqs numbering does not begin at zero. | |
a8db8cf0 GL |
220 | * @ops: map/unmap domain callbacks |
221 | * @host_data: Controller private data pointer | |
222 | * | |
223 | * Note: the map() callback will be called before this function returns | |
224 | * for all legacy interrupts except 0 (which is always the invalid irq for | |
225 | * a legacy controller). | |
226 | */ | |
227 | struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |
1bc04f2c GL |
228 | unsigned int size, |
229 | unsigned int first_irq, | |
230 | irq_hw_number_t first_hwirq, | |
a18dc81b | 231 | const struct irq_domain_ops *ops, |
a8db8cf0 GL |
232 | void *host_data) |
233 | { | |
1bc04f2c | 234 | struct irq_domain *domain; |
a8db8cf0 | 235 | |
1bf4ddc4 | 236 | domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, |
ddaf144c | 237 | first_hwirq + size, 0, ops, host_data); |
f8264e34 JL |
238 | if (domain) |
239 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); | |
1bc04f2c | 240 | |
a8db8cf0 GL |
241 | return domain; |
242 | } | |
ecd84eb2 | 243 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); |
a8db8cf0 | 244 | |
cc79ca69 | 245 | /** |
130b8c6c MZ |
246 | * irq_find_matching_fwnode() - Locates a domain for a given fwnode |
247 | * @fwnode: FW descriptor of the interrupt controller | |
ad3aedfb | 248 | * @bus_token: domain-specific data |
cc79ca69 | 249 | */ |
130b8c6c MZ |
250 | struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, |
251 | enum irq_domain_bus_token bus_token) | |
cc79ca69 GL |
252 | { |
253 | struct irq_domain *h, *found = NULL; | |
a18dc81b | 254 | int rc; |
cc79ca69 GL |
255 | |
256 | /* We might want to match the legacy controller last since | |
257 | * it might potentially be set to match all interrupts in | |
258 | * the absence of a device node. This isn't a problem so far | |
259 | * yet though... | |
ad3aedfb MZ |
260 | * |
261 | * bus_token == DOMAIN_BUS_ANY matches any domain, any other | |
262 | * values must generate an exact match for the domain to be | |
263 | * selected. | |
cc79ca69 GL |
264 | */ |
265 | mutex_lock(&irq_domain_mutex); | |
a18dc81b GL |
266 | list_for_each_entry(h, &irq_domain_list, link) { |
267 | if (h->ops->match) | |
130b8c6c | 268 | rc = h->ops->match(h, to_of_node(fwnode), bus_token); |
a18dc81b | 269 | else |
130b8c6c | 270 | rc = ((fwnode != NULL) && (h->fwnode == fwnode) && |
ad3aedfb MZ |
271 | ((bus_token == DOMAIN_BUS_ANY) || |
272 | (h->bus_token == bus_token))); | |
a18dc81b GL |
273 | |
274 | if (rc) { | |
cc79ca69 GL |
275 | found = h; |
276 | break; | |
277 | } | |
a18dc81b | 278 | } |
cc79ca69 GL |
279 | mutex_unlock(&irq_domain_mutex); |
280 | return found; | |
281 | } | |
130b8c6c | 282 | EXPORT_SYMBOL_GPL(irq_find_matching_fwnode); |
cc79ca69 GL |
283 | |
284 | /** | |
285 | * irq_set_default_host() - Set a "default" irq domain | |
68700650 | 286 | * @domain: default domain pointer |
cc79ca69 GL |
287 | * |
288 | * For convenience, it's possible to set a "default" domain that will be used | |
289 | * whenever NULL is passed to irq_create_mapping(). It makes life easier for | |
290 | * platforms that want to manipulate a few hard coded interrupt numbers that | |
291 | * aren't properly represented in the device-tree. | |
292 | */ | |
68700650 | 293 | void irq_set_default_host(struct irq_domain *domain) |
cc79ca69 | 294 | { |
54a90588 | 295 | pr_debug("Default domain set to @0x%p\n", domain); |
cc79ca69 | 296 | |
68700650 | 297 | irq_default_domain = domain; |
cc79ca69 | 298 | } |
ecd84eb2 | 299 | EXPORT_SYMBOL_GPL(irq_set_default_host); |
cc79ca69 | 300 | |
43a77591 | 301 | void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) |
913af207 | 302 | { |
ddaf144c GL |
303 | struct irq_data *irq_data = irq_get_irq_data(irq); |
304 | irq_hw_number_t hwirq; | |
913af207 | 305 | |
ddaf144c GL |
306 | if (WARN(!irq_data || irq_data->domain != domain, |
307 | "virq%i doesn't exist; cannot disassociate\n", irq)) | |
308 | return; | |
913af207 | 309 | |
ddaf144c GL |
310 | hwirq = irq_data->hwirq; |
311 | irq_set_status_flags(irq, IRQ_NOREQUEST); | |
913af207 | 312 | |
ddaf144c GL |
313 | /* remove chip and handler */ |
314 | irq_set_chip_and_handler(irq, NULL, NULL); | |
913af207 | 315 | |
ddaf144c GL |
316 | /* Make sure it's completed */ |
317 | synchronize_irq(irq); | |
913af207 | 318 | |
ddaf144c GL |
319 | /* Tell the PIC about it */ |
320 | if (domain->ops->unmap) | |
321 | domain->ops->unmap(domain, irq); | |
322 | smp_mb(); | |
913af207 | 323 | |
ddaf144c GL |
324 | irq_data->domain = NULL; |
325 | irq_data->hwirq = 0; | |
913af207 | 326 | |
ddaf144c GL |
327 | /* Clear reverse map for this hwirq */ |
328 | if (hwirq < domain->revmap_size) { | |
329 | domain->linear_revmap[hwirq] = 0; | |
330 | } else { | |
331 | mutex_lock(&revmap_trees_mutex); | |
332 | radix_tree_delete(&domain->revmap_tree, hwirq); | |
333 | mutex_unlock(&revmap_trees_mutex); | |
913af207 GL |
334 | } |
335 | } | |
336 | ||
ddaf144c GL |
337 | int irq_domain_associate(struct irq_domain *domain, unsigned int virq, |
338 | irq_hw_number_t hwirq) | |
cc79ca69 | 339 | { |
ddaf144c GL |
340 | struct irq_data *irq_data = irq_get_irq_data(virq); |
341 | int ret; | |
cc79ca69 | 342 | |
ddaf144c GL |
343 | if (WARN(hwirq >= domain->hwirq_max, |
344 | "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) | |
345 | return -EINVAL; | |
346 | if (WARN(!irq_data, "error: virq%i is not allocated", virq)) | |
347 | return -EINVAL; | |
348 | if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) | |
349 | return -EINVAL; | |
cc79ca69 | 350 | |
ddaf144c GL |
351 | mutex_lock(&irq_domain_mutex); |
352 | irq_data->hwirq = hwirq; | |
353 | irq_data->domain = domain; | |
354 | if (domain->ops->map) { | |
355 | ret = domain->ops->map(domain, virq, hwirq); | |
356 | if (ret != 0) { | |
357 | /* | |
358 | * If map() returns -EPERM, this interrupt is protected | |
359 | * by the firmware or some other service and shall not | |
360 | * be mapped. Don't bother telling the user about it. | |
361 | */ | |
362 | if (ret != -EPERM) { | |
363 | pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", | |
364 | domain->name, hwirq, virq, ret); | |
f5a1ad05 | 365 | } |
ddaf144c GL |
366 | irq_data->domain = NULL; |
367 | irq_data->hwirq = 0; | |
368 | mutex_unlock(&irq_domain_mutex); | |
369 | return ret; | |
98aa468e GL |
370 | } |
371 | ||
ddaf144c GL |
372 | /* If not already assigned, give the domain the chip's name */ |
373 | if (!domain->name && irq_data->chip) | |
374 | domain->name = irq_data->chip->name; | |
375 | } | |
2a71a1a9 | 376 | |
ddaf144c GL |
377 | if (hwirq < domain->revmap_size) { |
378 | domain->linear_revmap[hwirq] = virq; | |
379 | } else { | |
380 | mutex_lock(&revmap_trees_mutex); | |
381 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); | |
382 | mutex_unlock(&revmap_trees_mutex); | |
98aa468e | 383 | } |
ddaf144c GL |
384 | mutex_unlock(&irq_domain_mutex); |
385 | ||
386 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | |
cc79ca69 GL |
387 | |
388 | return 0; | |
389 | } | |
ddaf144c | 390 | EXPORT_SYMBOL_GPL(irq_domain_associate); |
98aa468e | 391 | |
ddaf144c GL |
392 | void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, |
393 | irq_hw_number_t hwirq_base, int count) | |
394 | { | |
5d4c9bc7 | 395 | struct device_node *of_node; |
ddaf144c GL |
396 | int i; |
397 | ||
5d4c9bc7 | 398 | of_node = irq_domain_get_of_node(domain); |
ddaf144c | 399 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, |
5d4c9bc7 | 400 | of_node_full_name(of_node), irq_base, (int)hwirq_base, count); |
ddaf144c GL |
401 | |
402 | for (i = 0; i < count; i++) { | |
403 | irq_domain_associate(domain, irq_base + i, hwirq_base + i); | |
404 | } | |
cc79ca69 | 405 | } |
98aa468e | 406 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); |
cc79ca69 GL |
407 | |
408 | /** | |
409 | * irq_create_direct_mapping() - Allocate an irq for direct mapping | |
68700650 | 410 | * @domain: domain to allocate the irq for or NULL for default domain |
cc79ca69 GL |
411 | * |
412 | * This routine is used for irq controllers which can choose the hardware | |
413 | * interrupt numbers they generate. In such a case it's simplest to use | |
1aa0dd94 GL |
414 | * the linux irq as the hardware interrupt number. It still uses the linear |
415 | * or radix tree to store the mapping, but the irq controller can optimize | |
416 | * the revmap path by using the hwirq directly. | |
cc79ca69 | 417 | */ |
68700650 | 418 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) |
cc79ca69 | 419 | { |
5d4c9bc7 | 420 | struct device_node *of_node; |
cc79ca69 GL |
421 | unsigned int virq; |
422 | ||
68700650 GL |
423 | if (domain == NULL) |
424 | domain = irq_default_domain; | |
cc79ca69 | 425 | |
5d4c9bc7 MZ |
426 | of_node = irq_domain_get_of_node(domain); |
427 | virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); | |
03848373 | 428 | if (!virq) { |
54a90588 | 429 | pr_debug("create_direct virq allocation failed\n"); |
03848373 | 430 | return 0; |
cc79ca69 | 431 | } |
1aa0dd94 | 432 | if (virq >= domain->revmap_direct_max_irq) { |
cc79ca69 | 433 | pr_err("ERROR: no free irqs available below %i maximum\n", |
1aa0dd94 | 434 | domain->revmap_direct_max_irq); |
cc79ca69 GL |
435 | irq_free_desc(virq); |
436 | return 0; | |
437 | } | |
54a90588 | 438 | pr_debug("create_direct obtained virq %d\n", virq); |
cc79ca69 | 439 | |
98aa468e | 440 | if (irq_domain_associate(domain, virq, virq)) { |
cc79ca69 | 441 | irq_free_desc(virq); |
03848373 | 442 | return 0; |
cc79ca69 GL |
443 | } |
444 | ||
445 | return virq; | |
446 | } | |
ecd84eb2 | 447 | EXPORT_SYMBOL_GPL(irq_create_direct_mapping); |
cc79ca69 GL |
448 | |
449 | /** | |
450 | * irq_create_mapping() - Map a hardware interrupt into linux irq space | |
68700650 GL |
451 | * @domain: domain owning this hardware interrupt or NULL for default domain |
452 | * @hwirq: hardware irq number in that domain space | |
cc79ca69 GL |
453 | * |
454 | * Only one mapping per hardware interrupt is permitted. Returns a linux | |
455 | * irq number. | |
456 | * If the sense/trigger is to be specified, set_irq_type() should be called | |
457 | * on the number returned from that call. | |
458 | */ | |
68700650 | 459 | unsigned int irq_create_mapping(struct irq_domain *domain, |
cc79ca69 GL |
460 | irq_hw_number_t hwirq) |
461 | { | |
5d4c9bc7 | 462 | struct device_node *of_node; |
5b7526e3 | 463 | int virq; |
cc79ca69 | 464 | |
54a90588 | 465 | pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); |
cc79ca69 | 466 | |
68700650 GL |
467 | /* Look for default domain if nececssary */ |
468 | if (domain == NULL) | |
469 | domain = irq_default_domain; | |
470 | if (domain == NULL) { | |
798f0fd1 | 471 | WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); |
03848373 | 472 | return 0; |
cc79ca69 | 473 | } |
54a90588 | 474 | pr_debug("-> using domain @%p\n", domain); |
cc79ca69 | 475 | |
5d4c9bc7 MZ |
476 | of_node = irq_domain_get_of_node(domain); |
477 | ||
cc79ca69 | 478 | /* Check if mapping already exists */ |
68700650 | 479 | virq = irq_find_mapping(domain, hwirq); |
03848373 | 480 | if (virq) { |
54a90588 | 481 | pr_debug("-> existing mapping on virq %d\n", virq); |
cc79ca69 GL |
482 | return virq; |
483 | } | |
484 | ||
1bc04f2c | 485 | /* Allocate a virtual interrupt number */ |
5d4c9bc7 | 486 | virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node)); |
5b7526e3 | 487 | if (virq <= 0) { |
54a90588 | 488 | pr_debug("-> virq allocation failed\n"); |
1bc04f2c | 489 | return 0; |
cc79ca69 GL |
490 | } |
491 | ||
98aa468e | 492 | if (irq_domain_associate(domain, virq, hwirq)) { |
73255704 | 493 | irq_free_desc(virq); |
03848373 | 494 | return 0; |
cc79ca69 GL |
495 | } |
496 | ||
54a90588 | 497 | pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", |
5d4c9bc7 | 498 | hwirq, of_node_full_name(of_node), virq); |
cc79ca69 GL |
499 | |
500 | return virq; | |
501 | } | |
502 | EXPORT_SYMBOL_GPL(irq_create_mapping); | |
503 | ||
98aa468e GL |
504 | /** |
505 | * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs | |
506 | * @domain: domain owning the interrupt range | |
507 | * @irq_base: beginning of linux IRQ range | |
508 | * @hwirq_base: beginning of hardware IRQ range | |
509 | * @count: Number of interrupts to map | |
510 | * | |
511 | * This routine is used for allocating and mapping a range of hardware | |
512 | * irqs to linux irqs where the linux irq numbers are at pre-defined | |
513 | * locations. For use by controllers that already have static mappings | |
514 | * to insert in to the domain. | |
515 | * | |
516 | * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time | |
517 | * domain insertion. | |
518 | * | |
519 | * 0 is returned upon success, while any failure to establish a static | |
520 | * mapping is treated as an error. | |
521 | */ | |
522 | int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, | |
523 | irq_hw_number_t hwirq_base, int count) | |
524 | { | |
5d4c9bc7 | 525 | struct device_node *of_node; |
98aa468e GL |
526 | int ret; |
527 | ||
5d4c9bc7 | 528 | of_node = irq_domain_get_of_node(domain); |
98aa468e | 529 | ret = irq_alloc_descs(irq_base, irq_base, count, |
5d4c9bc7 | 530 | of_node_to_nid(of_node)); |
98aa468e GL |
531 | if (unlikely(ret < 0)) |
532 | return ret; | |
533 | ||
ddaf144c | 534 | irq_domain_associate_many(domain, irq_base, hwirq_base, count); |
98aa468e GL |
535 | return 0; |
536 | } | |
537 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); | |
538 | ||
11e4438e MZ |
539 | static int irq_domain_translate(struct irq_domain *d, |
540 | struct irq_fwspec *fwspec, | |
541 | irq_hw_number_t *hwirq, unsigned int *type) | |
542 | { | |
543 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
544 | if (d->ops->translate) | |
545 | return d->ops->translate(d, fwspec, hwirq, type); | |
546 | #endif | |
547 | if (d->ops->xlate) | |
548 | return d->ops->xlate(d, to_of_node(fwspec->fwnode), | |
549 | fwspec->param, fwspec->param_count, | |
550 | hwirq, type); | |
551 | ||
552 | /* If domain has no translation, then we assume interrupt line */ | |
553 | *hwirq = fwspec->param[0]; | |
554 | return 0; | |
555 | } | |
556 | ||
557 | static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, | |
558 | struct irq_fwspec *fwspec) | |
559 | { | |
560 | int i; | |
561 | ||
562 | fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL; | |
563 | fwspec->param_count = irq_data->args_count; | |
564 | ||
565 | for (i = 0; i < irq_data->args_count; i++) | |
566 | fwspec->param[i] = irq_data->args[i]; | |
567 | } | |
568 | ||
c0131f09 | 569 | unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) |
cc79ca69 | 570 | { |
68700650 | 571 | struct irq_domain *domain; |
cc79ca69 GL |
572 | irq_hw_number_t hwirq; |
573 | unsigned int type = IRQ_TYPE_NONE; | |
f8264e34 | 574 | int virq; |
cc79ca69 | 575 | |
c0131f09 MZ |
576 | if (fwspec->fwnode) |
577 | domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY); | |
11e4438e MZ |
578 | else |
579 | domain = irq_default_domain; | |
580 | ||
68700650 | 581 | if (!domain) { |
798f0fd1 | 582 | pr_warn("no irq domain found for %s !\n", |
c0131f09 | 583 | of_node_full_name(to_of_node(fwspec->fwnode))); |
03848373 | 584 | return 0; |
cc79ca69 GL |
585 | } |
586 | ||
c0131f09 | 587 | if (irq_domain_translate(domain, fwspec, &hwirq, &type)) |
11e4438e | 588 | return 0; |
cc79ca69 | 589 | |
0cc01aba YC |
590 | if (irq_domain_is_hierarchy(domain)) { |
591 | /* | |
592 | * If we've already configured this interrupt, | |
593 | * don't do it again, or hell will break loose. | |
594 | */ | |
595 | virq = irq_find_mapping(domain, hwirq); | |
596 | if (virq) | |
597 | return virq; | |
598 | ||
c0131f09 | 599 | virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); |
0cc01aba YC |
600 | if (virq <= 0) |
601 | return 0; | |
602 | } else { | |
603 | /* Create mapping */ | |
604 | virq = irq_create_mapping(domain, hwirq); | |
605 | if (!virq) | |
606 | return virq; | |
607 | } | |
cc79ca69 GL |
608 | |
609 | /* Set type if specified and different than the current one */ | |
610 | if (type != IRQ_TYPE_NONE && | |
fbab62c5 | 611 | type != irq_get_trigger_type(virq)) |
cc79ca69 GL |
612 | irq_set_irq_type(virq, type); |
613 | return virq; | |
614 | } | |
c0131f09 MZ |
615 | EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); |
616 | ||
617 | unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | |
618 | { | |
619 | struct irq_fwspec fwspec; | |
620 | ||
621 | of_phandle_args_to_fwspec(irq_data, &fwspec); | |
622 | return irq_create_fwspec_mapping(&fwspec); | |
623 | } | |
cc79ca69 GL |
624 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
625 | ||
626 | /** | |
627 | * irq_dispose_mapping() - Unmap an interrupt | |
628 | * @virq: linux irq number of the interrupt to unmap | |
629 | */ | |
630 | void irq_dispose_mapping(unsigned int virq) | |
631 | { | |
632 | struct irq_data *irq_data = irq_get_irq_data(virq); | |
68700650 | 633 | struct irq_domain *domain; |
cc79ca69 | 634 | |
03848373 | 635 | if (!virq || !irq_data) |
cc79ca69 GL |
636 | return; |
637 | ||
68700650 GL |
638 | domain = irq_data->domain; |
639 | if (WARN_ON(domain == NULL)) | |
cc79ca69 GL |
640 | return; |
641 | ||
ddaf144c | 642 | irq_domain_disassociate(domain, virq); |
cc79ca69 GL |
643 | irq_free_desc(virq); |
644 | } | |
645 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | |
646 | ||
647 | /** | |
648 | * irq_find_mapping() - Find a linux irq from an hw irq number. | |
68700650 GL |
649 | * @domain: domain owning this hardware interrupt |
650 | * @hwirq: hardware irq number in that domain space | |
cc79ca69 | 651 | */ |
68700650 | 652 | unsigned int irq_find_mapping(struct irq_domain *domain, |
cc79ca69 GL |
653 | irq_hw_number_t hwirq) |
654 | { | |
4c0946c4 | 655 | struct irq_data *data; |
cc79ca69 | 656 | |
68700650 GL |
657 | /* Look for default domain if nececssary */ |
658 | if (domain == NULL) | |
659 | domain = irq_default_domain; | |
660 | if (domain == NULL) | |
03848373 | 661 | return 0; |
cc79ca69 | 662 | |
1aa0dd94 | 663 | if (hwirq < domain->revmap_direct_max_irq) { |
f8264e34 JL |
664 | data = irq_domain_get_irq_data(domain, hwirq); |
665 | if (data && data->hwirq == hwirq) | |
4c0946c4 | 666 | return hwirq; |
4c0946c4 GL |
667 | } |
668 | ||
d3dcb436 GL |
669 | /* Check if the hwirq is in the linear revmap. */ |
670 | if (hwirq < domain->revmap_size) | |
671 | return domain->linear_revmap[hwirq]; | |
cc79ca69 | 672 | |
d3dcb436 GL |
673 | rcu_read_lock(); |
674 | data = radix_tree_lookup(&domain->revmap_tree, hwirq); | |
675 | rcu_read_unlock(); | |
676 | return data ? data->irq : 0; | |
cc79ca69 | 677 | } |
cc79ca69 | 678 | EXPORT_SYMBOL_GPL(irq_find_mapping); |
cc79ca69 | 679 | |
092b2fb0 | 680 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG |
cc79ca69 GL |
681 | static int virq_debug_show(struct seq_file *m, void *private) |
682 | { | |
683 | unsigned long flags; | |
684 | struct irq_desc *desc; | |
1400ea86 GL |
685 | struct irq_domain *domain; |
686 | struct radix_tree_iter iter; | |
687 | void *data, **slot; | |
cc79ca69 GL |
688 | int i; |
689 | ||
1400ea86 GL |
690 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", |
691 | "name", "mapped", "linear-max", "direct-max", "devtree-node"); | |
692 | mutex_lock(&irq_domain_mutex); | |
693 | list_for_each_entry(domain, &irq_domain_list, link) { | |
5d4c9bc7 | 694 | struct device_node *of_node; |
1400ea86 | 695 | int count = 0; |
5d4c9bc7 | 696 | of_node = irq_domain_get_of_node(domain); |
1400ea86 GL |
697 | radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) |
698 | count++; | |
699 | seq_printf(m, "%c%-16s %6u %10u %10u %s\n", | |
700 | domain == irq_default_domain ? '*' : ' ', domain->name, | |
701 | domain->revmap_size + count, domain->revmap_size, | |
702 | domain->revmap_direct_max_irq, | |
5d4c9bc7 | 703 | of_node ? of_node_full_name(of_node) : ""); |
1400ea86 GL |
704 | } |
705 | mutex_unlock(&irq_domain_mutex); | |
706 | ||
707 | seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", | |
5269a9ab | 708 | "chip name", (int)(2 * sizeof(void *) + 2), "chip data", |
1400ea86 | 709 | "active", "type", "domain"); |
cc79ca69 GL |
710 | |
711 | for (i = 1; i < nr_irqs; i++) { | |
712 | desc = irq_to_desc(i); | |
713 | if (!desc) | |
714 | continue; | |
715 | ||
716 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1400ea86 | 717 | domain = desc->irq_data.domain; |
cc79ca69 | 718 | |
1400ea86 | 719 | if (domain) { |
cc79ca69 | 720 | struct irq_chip *chip; |
1400ea86 GL |
721 | int hwirq = desc->irq_data.hwirq; |
722 | bool direct; | |
cc79ca69 GL |
723 | |
724 | seq_printf(m, "%5d ", i); | |
1400ea86 | 725 | seq_printf(m, "0x%05x ", hwirq); |
cc79ca69 GL |
726 | |
727 | chip = irq_desc_get_chip(desc); | |
0bb4afb4 | 728 | seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); |
cc79ca69 GL |
729 | |
730 | data = irq_desc_get_chip_data(desc); | |
15e06bf6 | 731 | seq_printf(m, data ? "0x%p " : " %p ", data); |
cc79ca69 | 732 | |
1400ea86 GL |
733 | seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); |
734 | direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); | |
735 | seq_printf(m, "%6s%-8s ", | |
736 | (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", | |
737 | direct ? "(DIRECT)" : ""); | |
0bb4afb4 | 738 | seq_printf(m, "%s\n", desc->irq_data.domain->name); |
cc79ca69 GL |
739 | } |
740 | ||
741 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
742 | } | |
743 | ||
744 | return 0; | |
745 | } | |
746 | ||
747 | static int virq_debug_open(struct inode *inode, struct file *file) | |
748 | { | |
749 | return single_open(file, virq_debug_show, inode->i_private); | |
750 | } | |
751 | ||
752 | static const struct file_operations virq_debug_fops = { | |
753 | .open = virq_debug_open, | |
754 | .read = seq_read, | |
755 | .llseek = seq_lseek, | |
756 | .release = single_release, | |
757 | }; | |
758 | ||
759 | static int __init irq_debugfs_init(void) | |
760 | { | |
092b2fb0 | 761 | if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, |
cc79ca69 GL |
762 | NULL, &virq_debug_fops) == NULL) |
763 | return -ENOMEM; | |
764 | ||
765 | return 0; | |
766 | } | |
767 | __initcall(irq_debugfs_init); | |
092b2fb0 | 768 | #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ |
cc79ca69 | 769 | |
16b2e6e2 GL |
770 | /** |
771 | * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings | |
772 | * | |
773 | * Device Tree IRQ specifier translation function which works with one cell | |
774 | * bindings where the cell value maps directly to the hwirq number. | |
775 | */ | |
776 | int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, | |
777 | const u32 *intspec, unsigned int intsize, | |
778 | unsigned long *out_hwirq, unsigned int *out_type) | |
7e713301 | 779 | { |
16b2e6e2 | 780 | if (WARN_ON(intsize < 1)) |
7e713301 | 781 | return -EINVAL; |
7e713301 GL |
782 | *out_hwirq = intspec[0]; |
783 | *out_type = IRQ_TYPE_NONE; | |
7e713301 GL |
784 | return 0; |
785 | } | |
16b2e6e2 GL |
786 | EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); |
787 | ||
788 | /** | |
789 | * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings | |
790 | * | |
791 | * Device Tree IRQ specifier translation function which works with two cell | |
792 | * bindings where the cell values map directly to the hwirq number | |
793 | * and linux irq flags. | |
794 | */ | |
795 | int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, | |
796 | const u32 *intspec, unsigned int intsize, | |
797 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | |
798 | { | |
799 | if (WARN_ON(intsize < 2)) | |
800 | return -EINVAL; | |
801 | *out_hwirq = intspec[0]; | |
802 | *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; | |
803 | return 0; | |
804 | } | |
805 | EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); | |
806 | ||
807 | /** | |
808 | * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings | |
809 | * | |
810 | * Device Tree IRQ specifier translation function which works with either one | |
811 | * or two cell bindings where the cell values map directly to the hwirq number | |
812 | * and linux irq flags. | |
813 | * | |
814 | * Note: don't use this function unless your interrupt controller explicitly | |
815 | * supports both one and two cell bindings. For the majority of controllers | |
816 | * the _onecell() or _twocell() variants above should be used. | |
817 | */ | |
818 | int irq_domain_xlate_onetwocell(struct irq_domain *d, | |
819 | struct device_node *ctrlr, | |
820 | const u32 *intspec, unsigned int intsize, | |
821 | unsigned long *out_hwirq, unsigned int *out_type) | |
822 | { | |
823 | if (WARN_ON(intsize < 1)) | |
824 | return -EINVAL; | |
825 | *out_hwirq = intspec[0]; | |
826 | *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; | |
827 | return 0; | |
828 | } | |
829 | EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); | |
7e713301 | 830 | |
a18dc81b | 831 | const struct irq_domain_ops irq_domain_simple_ops = { |
16b2e6e2 | 832 | .xlate = irq_domain_xlate_onetwocell, |
75294957 GL |
833 | }; |
834 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); | |
f8264e34 JL |
835 | |
836 | static int irq_domain_alloc_descs(int virq, unsigned int cnt, | |
837 | irq_hw_number_t hwirq, int node) | |
838 | { | |
839 | unsigned int hint; | |
840 | ||
841 | if (virq >= 0) { | |
842 | virq = irq_alloc_descs(virq, virq, cnt, node); | |
843 | } else { | |
844 | hint = hwirq % nr_irqs; | |
845 | if (hint == 0) | |
846 | hint++; | |
847 | virq = irq_alloc_descs_from(hint, cnt, node); | |
848 | if (virq <= 0 && hint > 1) | |
849 | virq = irq_alloc_descs_from(1, cnt, node); | |
850 | } | |
851 | ||
852 | return virq; | |
853 | } | |
854 | ||
855 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
afb7da83 | 856 | /** |
2a5e9a07 | 857 | * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy |
afb7da83 JL |
858 | * @parent: Parent irq domain to associate with the new domain |
859 | * @flags: Irq domain flags associated to the domain | |
860 | * @size: Size of the domain. See below | |
2a5e9a07 | 861 | * @fwnode: Optional fwnode of the interrupt controller |
afb7da83 JL |
862 | * @ops: Pointer to the interrupt domain callbacks |
863 | * @host_data: Controller private data pointer | |
864 | * | |
865 | * If @size is 0 a tree domain is created, otherwise a linear domain. | |
866 | * | |
867 | * If successful the parent is associated to the new domain and the | |
868 | * domain flags are set. | |
869 | * Returns pointer to IRQ domain, or NULL on failure. | |
870 | */ | |
2a5e9a07 | 871 | struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, |
afb7da83 JL |
872 | unsigned int flags, |
873 | unsigned int size, | |
2a5e9a07 | 874 | struct fwnode_handle *fwnode, |
afb7da83 JL |
875 | const struct irq_domain_ops *ops, |
876 | void *host_data) | |
877 | { | |
878 | struct irq_domain *domain; | |
879 | ||
880 | if (size) | |
2a5e9a07 | 881 | domain = irq_domain_create_linear(fwnode, size, ops, host_data); |
afb7da83 | 882 | else |
2a5e9a07 | 883 | domain = irq_domain_create_tree(fwnode, ops, host_data); |
afb7da83 JL |
884 | if (domain) { |
885 | domain->parent = parent; | |
886 | domain->flags |= flags; | |
887 | } | |
888 | ||
889 | return domain; | |
890 | } | |
891 | ||
f8264e34 JL |
892 | static void irq_domain_insert_irq(int virq) |
893 | { | |
894 | struct irq_data *data; | |
895 | ||
896 | for (data = irq_get_irq_data(virq); data; data = data->parent_data) { | |
897 | struct irq_domain *domain = data->domain; | |
898 | irq_hw_number_t hwirq = data->hwirq; | |
899 | ||
900 | if (hwirq < domain->revmap_size) { | |
901 | domain->linear_revmap[hwirq] = virq; | |
902 | } else { | |
903 | mutex_lock(&revmap_trees_mutex); | |
904 | radix_tree_insert(&domain->revmap_tree, hwirq, data); | |
905 | mutex_unlock(&revmap_trees_mutex); | |
906 | } | |
907 | ||
908 | /* If not already assigned, give the domain the chip's name */ | |
909 | if (!domain->name && data->chip) | |
910 | domain->name = data->chip->name; | |
911 | } | |
912 | ||
913 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | |
914 | } | |
915 | ||
916 | static void irq_domain_remove_irq(int virq) | |
917 | { | |
918 | struct irq_data *data; | |
919 | ||
920 | irq_set_status_flags(virq, IRQ_NOREQUEST); | |
921 | irq_set_chip_and_handler(virq, NULL, NULL); | |
922 | synchronize_irq(virq); | |
923 | smp_mb(); | |
924 | ||
925 | for (data = irq_get_irq_data(virq); data; data = data->parent_data) { | |
926 | struct irq_domain *domain = data->domain; | |
927 | irq_hw_number_t hwirq = data->hwirq; | |
928 | ||
929 | if (hwirq < domain->revmap_size) { | |
930 | domain->linear_revmap[hwirq] = 0; | |
931 | } else { | |
932 | mutex_lock(&revmap_trees_mutex); | |
933 | radix_tree_delete(&domain->revmap_tree, hwirq); | |
934 | mutex_unlock(&revmap_trees_mutex); | |
935 | } | |
936 | } | |
937 | } | |
938 | ||
939 | static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, | |
940 | struct irq_data *child) | |
941 | { | |
942 | struct irq_data *irq_data; | |
943 | ||
6783011b JL |
944 | irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, |
945 | irq_data_get_node(child)); | |
f8264e34 JL |
946 | if (irq_data) { |
947 | child->parent_data = irq_data; | |
948 | irq_data->irq = child->irq; | |
0d0b4c86 | 949 | irq_data->common = child->common; |
f8264e34 JL |
950 | irq_data->domain = domain; |
951 | } | |
952 | ||
953 | return irq_data; | |
954 | } | |
955 | ||
956 | static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) | |
957 | { | |
958 | struct irq_data *irq_data, *tmp; | |
959 | int i; | |
960 | ||
961 | for (i = 0; i < nr_irqs; i++) { | |
962 | irq_data = irq_get_irq_data(virq + i); | |
963 | tmp = irq_data->parent_data; | |
964 | irq_data->parent_data = NULL; | |
965 | irq_data->domain = NULL; | |
966 | ||
967 | while (tmp) { | |
968 | irq_data = tmp; | |
969 | tmp = tmp->parent_data; | |
970 | kfree(irq_data); | |
971 | } | |
972 | } | |
973 | } | |
974 | ||
975 | static int irq_domain_alloc_irq_data(struct irq_domain *domain, | |
976 | unsigned int virq, unsigned int nr_irqs) | |
977 | { | |
978 | struct irq_data *irq_data; | |
979 | struct irq_domain *parent; | |
980 | int i; | |
981 | ||
982 | /* The outermost irq_data is embedded in struct irq_desc */ | |
983 | for (i = 0; i < nr_irqs; i++) { | |
984 | irq_data = irq_get_irq_data(virq + i); | |
985 | irq_data->domain = domain; | |
986 | ||
987 | for (parent = domain->parent; parent; parent = parent->parent) { | |
988 | irq_data = irq_domain_insert_irq_data(parent, irq_data); | |
989 | if (!irq_data) { | |
990 | irq_domain_free_irq_data(virq, i + 1); | |
991 | return -ENOMEM; | |
992 | } | |
993 | } | |
994 | } | |
995 | ||
996 | return 0; | |
997 | } | |
998 | ||
999 | /** | |
1000 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain | |
1001 | * @domain: domain to match | |
1002 | * @virq: IRQ number to get irq_data | |
1003 | */ | |
1004 | struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, | |
1005 | unsigned int virq) | |
1006 | { | |
1007 | struct irq_data *irq_data; | |
1008 | ||
1009 | for (irq_data = irq_get_irq_data(virq); irq_data; | |
1010 | irq_data = irq_data->parent_data) | |
1011 | if (irq_data->domain == domain) | |
1012 | return irq_data; | |
1013 | ||
1014 | return NULL; | |
1015 | } | |
1016 | ||
1017 | /** | |
1018 | * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain | |
1019 | * @domain: Interrupt domain to match | |
1020 | * @virq: IRQ number | |
1021 | * @hwirq: The hwirq number | |
1022 | * @chip: The associated interrupt chip | |
1023 | * @chip_data: The associated chip data | |
1024 | */ | |
1025 | int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, | |
1026 | irq_hw_number_t hwirq, struct irq_chip *chip, | |
1027 | void *chip_data) | |
1028 | { | |
1029 | struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); | |
1030 | ||
1031 | if (!irq_data) | |
1032 | return -ENOENT; | |
1033 | ||
1034 | irq_data->hwirq = hwirq; | |
1035 | irq_data->chip = chip ? chip : &no_irq_chip; | |
1036 | irq_data->chip_data = chip_data; | |
1037 | ||
1038 | return 0; | |
1039 | } | |
1040 | ||
1b537708 JL |
1041 | /** |
1042 | * irq_domain_set_info - Set the complete data for a @virq in @domain | |
1043 | * @domain: Interrupt domain to match | |
1044 | * @virq: IRQ number | |
1045 | * @hwirq: The hardware interrupt number | |
1046 | * @chip: The associated interrupt chip | |
1047 | * @chip_data: The associated interrupt chip data | |
1048 | * @handler: The interrupt flow handler | |
1049 | * @handler_data: The interrupt flow handler data | |
1050 | * @handler_name: The interrupt handler name | |
1051 | */ | |
1052 | void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, | |
1053 | irq_hw_number_t hwirq, struct irq_chip *chip, | |
1054 | void *chip_data, irq_flow_handler_t handler, | |
1055 | void *handler_data, const char *handler_name) | |
1056 | { | |
1057 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); | |
1058 | __irq_set_handler(virq, handler, 0, handler_name); | |
1059 | irq_set_handler_data(virq, handler_data); | |
1060 | } | |
1061 | ||
f8264e34 JL |
1062 | /** |
1063 | * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data | |
1064 | * @irq_data: The pointer to irq_data | |
1065 | */ | |
1066 | void irq_domain_reset_irq_data(struct irq_data *irq_data) | |
1067 | { | |
1068 | irq_data->hwirq = 0; | |
1069 | irq_data->chip = &no_irq_chip; | |
1070 | irq_data->chip_data = NULL; | |
1071 | } | |
1072 | ||
1073 | /** | |
1074 | * irq_domain_free_irqs_common - Clear irq_data and free the parent | |
1075 | * @domain: Interrupt domain to match | |
1076 | * @virq: IRQ number to start with | |
1077 | * @nr_irqs: The number of irqs to free | |
1078 | */ | |
1079 | void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, | |
1080 | unsigned int nr_irqs) | |
1081 | { | |
1082 | struct irq_data *irq_data; | |
1083 | int i; | |
1084 | ||
1085 | for (i = 0; i < nr_irqs; i++) { | |
1086 | irq_data = irq_domain_get_irq_data(domain, virq + i); | |
1087 | if (irq_data) | |
1088 | irq_domain_reset_irq_data(irq_data); | |
1089 | } | |
1090 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
1091 | } | |
1092 | ||
1093 | /** | |
1094 | * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent | |
1095 | * @domain: Interrupt domain to match | |
1096 | * @virq: IRQ number to start with | |
1097 | * @nr_irqs: The number of irqs to free | |
1098 | */ | |
1099 | void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, | |
1100 | unsigned int nr_irqs) | |
1101 | { | |
1102 | int i; | |
1103 | ||
1104 | for (i = 0; i < nr_irqs; i++) { | |
1105 | irq_set_handler_data(virq + i, NULL); | |
1106 | irq_set_handler(virq + i, NULL); | |
1107 | } | |
1108 | irq_domain_free_irqs_common(domain, virq, nr_irqs); | |
1109 | } | |
1110 | ||
36d72731 JL |
1111 | static bool irq_domain_is_auto_recursive(struct irq_domain *domain) |
1112 | { | |
1113 | return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; | |
1114 | } | |
1115 | ||
1116 | static void irq_domain_free_irqs_recursive(struct irq_domain *domain, | |
1117 | unsigned int irq_base, | |
1118 | unsigned int nr_irqs) | |
1119 | { | |
1120 | domain->ops->free(domain, irq_base, nr_irqs); | |
1121 | if (irq_domain_is_auto_recursive(domain)) { | |
1122 | BUG_ON(!domain->parent); | |
1123 | irq_domain_free_irqs_recursive(domain->parent, irq_base, | |
1124 | nr_irqs); | |
1125 | } | |
1126 | } | |
1127 | ||
c466595c MZ |
1128 | int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, |
1129 | unsigned int irq_base, | |
1130 | unsigned int nr_irqs, void *arg) | |
36d72731 JL |
1131 | { |
1132 | int ret = 0; | |
1133 | struct irq_domain *parent = domain->parent; | |
1134 | bool recursive = irq_domain_is_auto_recursive(domain); | |
1135 | ||
1136 | BUG_ON(recursive && !parent); | |
1137 | if (recursive) | |
1138 | ret = irq_domain_alloc_irqs_recursive(parent, irq_base, | |
1139 | nr_irqs, arg); | |
1140 | if (ret >= 0) | |
1141 | ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); | |
1142 | if (ret < 0 && recursive) | |
1143 | irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); | |
1144 | ||
1145 | return ret; | |
1146 | } | |
1147 | ||
f8264e34 JL |
1148 | /** |
1149 | * __irq_domain_alloc_irqs - Allocate IRQs from domain | |
1150 | * @domain: domain to allocate from | |
1151 | * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 | |
1152 | * @nr_irqs: number of IRQs to allocate | |
1153 | * @node: NUMA node id for memory allocation | |
1154 | * @arg: domain specific argument | |
1155 | * @realloc: IRQ descriptors have already been allocated if true | |
1156 | * | |
1157 | * Allocate IRQ numbers and initialized all data structures to support | |
1158 | * hierarchy IRQ domains. | |
1159 | * Parameter @realloc is mainly to support legacy IRQs. | |
1160 | * Returns error code or allocated IRQ number | |
1161 | * | |
1162 | * The whole process to setup an IRQ has been split into two steps. | |
1163 | * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ | |
1164 | * descriptor and required hardware resources. The second step, | |
1165 | * irq_domain_activate_irq(), is to program hardwares with preallocated | |
1166 | * resources. In this way, it's easier to rollback when failing to | |
1167 | * allocate resources. | |
1168 | */ | |
1169 | int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, | |
1170 | unsigned int nr_irqs, int node, void *arg, | |
1171 | bool realloc) | |
1172 | { | |
1173 | int i, ret, virq; | |
1174 | ||
1175 | if (domain == NULL) { | |
1176 | domain = irq_default_domain; | |
1177 | if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) | |
1178 | return -EINVAL; | |
1179 | } | |
1180 | ||
1181 | if (!domain->ops->alloc) { | |
1182 | pr_debug("domain->ops->alloc() is NULL\n"); | |
1183 | return -ENOSYS; | |
1184 | } | |
1185 | ||
1186 | if (realloc && irq_base >= 0) { | |
1187 | virq = irq_base; | |
1188 | } else { | |
1189 | virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); | |
1190 | if (virq < 0) { | |
1191 | pr_debug("cannot allocate IRQ(base %d, count %d)\n", | |
1192 | irq_base, nr_irqs); | |
1193 | return virq; | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { | |
1198 | pr_debug("cannot allocate memory for IRQ%d\n", virq); | |
1199 | ret = -ENOMEM; | |
1200 | goto out_free_desc; | |
1201 | } | |
1202 | ||
1203 | mutex_lock(&irq_domain_mutex); | |
36d72731 | 1204 | ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); |
f8264e34 JL |
1205 | if (ret < 0) { |
1206 | mutex_unlock(&irq_domain_mutex); | |
1207 | goto out_free_irq_data; | |
1208 | } | |
1209 | for (i = 0; i < nr_irqs; i++) | |
1210 | irq_domain_insert_irq(virq + i); | |
1211 | mutex_unlock(&irq_domain_mutex); | |
1212 | ||
1213 | return virq; | |
1214 | ||
1215 | out_free_irq_data: | |
1216 | irq_domain_free_irq_data(virq, nr_irqs); | |
1217 | out_free_desc: | |
1218 | irq_free_descs(virq, nr_irqs); | |
1219 | return ret; | |
1220 | } | |
1221 | ||
1222 | /** | |
1223 | * irq_domain_free_irqs - Free IRQ number and associated data structures | |
1224 | * @virq: base IRQ number | |
1225 | * @nr_irqs: number of IRQs to free | |
1226 | */ | |
1227 | void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) | |
1228 | { | |
1229 | struct irq_data *data = irq_get_irq_data(virq); | |
1230 | int i; | |
1231 | ||
1232 | if (WARN(!data || !data->domain || !data->domain->ops->free, | |
1233 | "NULL pointer, cannot free irq\n")) | |
1234 | return; | |
1235 | ||
1236 | mutex_lock(&irq_domain_mutex); | |
1237 | for (i = 0; i < nr_irqs; i++) | |
1238 | irq_domain_remove_irq(virq + i); | |
36d72731 | 1239 | irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); |
f8264e34 JL |
1240 | mutex_unlock(&irq_domain_mutex); |
1241 | ||
1242 | irq_domain_free_irq_data(virq, nr_irqs); | |
1243 | irq_free_descs(virq, nr_irqs); | |
1244 | } | |
1245 | ||
36d72731 JL |
1246 | /** |
1247 | * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain | |
1248 | * @irq_base: Base IRQ number | |
1249 | * @nr_irqs: Number of IRQs to allocate | |
1250 | * @arg: Allocation data (arch/domain specific) | |
1251 | * | |
1252 | * Check whether the domain has been setup recursive. If not allocate | |
1253 | * through the parent domain. | |
1254 | */ | |
1255 | int irq_domain_alloc_irqs_parent(struct irq_domain *domain, | |
1256 | unsigned int irq_base, unsigned int nr_irqs, | |
1257 | void *arg) | |
1258 | { | |
1259 | /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ | |
1260 | if (irq_domain_is_auto_recursive(domain)) | |
1261 | return 0; | |
1262 | ||
1263 | domain = domain->parent; | |
1264 | if (domain) | |
1265 | return irq_domain_alloc_irqs_recursive(domain, irq_base, | |
1266 | nr_irqs, arg); | |
1267 | return -ENOSYS; | |
1268 | } | |
1269 | ||
1270 | /** | |
1271 | * irq_domain_free_irqs_parent - Free interrupts from parent domain | |
1272 | * @irq_base: Base IRQ number | |
1273 | * @nr_irqs: Number of IRQs to free | |
1274 | * | |
1275 | * Check whether the domain has been setup recursive. If not free | |
1276 | * through the parent domain. | |
1277 | */ | |
1278 | void irq_domain_free_irqs_parent(struct irq_domain *domain, | |
1279 | unsigned int irq_base, unsigned int nr_irqs) | |
1280 | { | |
1281 | /* irq_domain_free_irqs_recursive() will call parent's free */ | |
1282 | if (!irq_domain_is_auto_recursive(domain) && domain->parent) | |
1283 | irq_domain_free_irqs_recursive(domain->parent, irq_base, | |
1284 | nr_irqs); | |
1285 | } | |
1286 | ||
f8264e34 JL |
1287 | /** |
1288 | * irq_domain_activate_irq - Call domain_ops->activate recursively to activate | |
1289 | * interrupt | |
1290 | * @irq_data: outermost irq_data associated with interrupt | |
1291 | * | |
1292 | * This is the second step to call domain_ops->activate to program interrupt | |
1293 | * controllers, so the interrupt could actually get delivered. | |
1294 | */ | |
1295 | void irq_domain_activate_irq(struct irq_data *irq_data) | |
1296 | { | |
1297 | if (irq_data && irq_data->domain) { | |
1298 | struct irq_domain *domain = irq_data->domain; | |
1299 | ||
1300 | if (irq_data->parent_data) | |
1301 | irq_domain_activate_irq(irq_data->parent_data); | |
1302 | if (domain->ops->activate) | |
1303 | domain->ops->activate(domain, irq_data); | |
1304 | } | |
1305 | } | |
1306 | ||
1307 | /** | |
1308 | * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to | |
1309 | * deactivate interrupt | |
1310 | * @irq_data: outermost irq_data associated with interrupt | |
1311 | * | |
1312 | * It calls domain_ops->deactivate to program interrupt controllers to disable | |
1313 | * interrupt delivery. | |
1314 | */ | |
1315 | void irq_domain_deactivate_irq(struct irq_data *irq_data) | |
1316 | { | |
1317 | if (irq_data && irq_data->domain) { | |
1318 | struct irq_domain *domain = irq_data->domain; | |
1319 | ||
1320 | if (domain->ops->deactivate) | |
1321 | domain->ops->deactivate(domain, irq_data); | |
1322 | if (irq_data->parent_data) | |
1323 | irq_domain_deactivate_irq(irq_data->parent_data); | |
1324 | } | |
1325 | } | |
1326 | ||
1327 | static void irq_domain_check_hierarchy(struct irq_domain *domain) | |
1328 | { | |
1329 | /* Hierarchy irq_domains must implement callback alloc() */ | |
1330 | if (domain->ops->alloc) | |
1331 | domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; | |
1332 | } | |
1333 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | |
1334 | /** | |
1335 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain | |
1336 | * @domain: domain to match | |
1337 | * @virq: IRQ number to get irq_data | |
1338 | */ | |
1339 | struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, | |
1340 | unsigned int virq) | |
1341 | { | |
1342 | struct irq_data *irq_data = irq_get_irq_data(virq); | |
1343 | ||
1344 | return (irq_data && irq_data->domain == domain) ? irq_data : NULL; | |
1345 | } | |
1346 | ||
5f22f5c6 SA |
1347 | /** |
1348 | * irq_domain_set_info - Set the complete data for a @virq in @domain | |
1349 | * @domain: Interrupt domain to match | |
1350 | * @virq: IRQ number | |
1351 | * @hwirq: The hardware interrupt number | |
1352 | * @chip: The associated interrupt chip | |
1353 | * @chip_data: The associated interrupt chip data | |
1354 | * @handler: The interrupt flow handler | |
1355 | * @handler_data: The interrupt flow handler data | |
1356 | * @handler_name: The interrupt handler name | |
1357 | */ | |
1358 | void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, | |
1359 | irq_hw_number_t hwirq, struct irq_chip *chip, | |
1360 | void *chip_data, irq_flow_handler_t handler, | |
1361 | void *handler_data, const char *handler_name) | |
1362 | { | |
1363 | irq_set_chip_and_handler_name(virq, chip, handler, handler_name); | |
1364 | irq_set_chip_data(virq, chip_data); | |
1365 | irq_set_handler_data(virq, handler_data); | |
1366 | } | |
1367 | ||
f8264e34 JL |
1368 | static void irq_domain_check_hierarchy(struct irq_domain *domain) |
1369 | { | |
1370 | } | |
1371 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |