]>
Commit | Line | Data |
---|---|---|
52a65ff5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f3cf8bb0 | 2 | /* |
f3cf8bb0 JL |
3 | * Copyright (C) 2014 Intel Corp. |
4 | * Author: Jiang Liu <[email protected]> | |
5 | * | |
6 | * This file is licensed under GPLv2. | |
7 | * | |
8 | * This file contains common code to support Message Signalled Interrupt for | |
9 | * PCI compatible and non PCI compatible devices. | |
10 | */ | |
aeeb5965 JL |
11 | #include <linux/types.h> |
12 | #include <linux/device.h> | |
f3cf8bb0 JL |
13 | #include <linux/irq.h> |
14 | #include <linux/irqdomain.h> | |
15 | #include <linux/msi.h> | |
4e201566 | 16 | #include <linux/slab.h> |
d9109698 | 17 | |
07557ccb TG |
18 | #include "internals.h" |
19 | ||
28f4b041 TG |
20 | /** |
21 | * alloc_msi_entry - Allocate an initialize msi_entry | |
22 | * @dev: Pointer to the device for which this is allocated | |
23 | * @nvec: The number of vectors used in this entry | |
24 | * @affinity: Optional pointer to an affinity mask array size of @nvec | |
25 | * | |
26 | * If @affinity is not NULL then a an affinity array[@nvec] is allocated | |
27 | * and the affinity masks from @affinity are copied. | |
28 | */ | |
29 | struct msi_desc * | |
30 | alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity) | |
aa48b6f7 | 31 | { |
28f4b041 TG |
32 | struct msi_desc *desc; |
33 | ||
34 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | |
aa48b6f7 JL |
35 | if (!desc) |
36 | return NULL; | |
37 | ||
38 | INIT_LIST_HEAD(&desc->list); | |
39 | desc->dev = dev; | |
28f4b041 TG |
40 | desc->nvec_used = nvec; |
41 | if (affinity) { | |
42 | desc->affinity = kmemdup(affinity, | |
43 | nvec * sizeof(*desc->affinity), GFP_KERNEL); | |
44 | if (!desc->affinity) { | |
45 | kfree(desc); | |
46 | return NULL; | |
47 | } | |
48 | } | |
aa48b6f7 JL |
49 | |
50 | return desc; | |
51 | } | |
52 | ||
53 | void free_msi_entry(struct msi_desc *entry) | |
54 | { | |
28f4b041 | 55 | kfree(entry->affinity); |
aa48b6f7 JL |
56 | kfree(entry); |
57 | } | |
58 | ||
38b6a1cf JL |
59 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
60 | { | |
61 | *msg = entry->msg; | |
62 | } | |
63 | ||
64 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | |
65 | { | |
66 | struct msi_desc *entry = irq_get_msi_desc(irq); | |
67 | ||
68 | __get_cached_msi_msg(entry, msg); | |
69 | } | |
70 | EXPORT_SYMBOL_GPL(get_cached_msi_msg); | |
71 | ||
f3cf8bb0 | 72 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
74faaf7a TG |
73 | static inline void irq_chip_write_msi_msg(struct irq_data *data, |
74 | struct msi_msg *msg) | |
75 | { | |
76 | data->chip->irq_write_msi_msg(data, msg); | |
77 | } | |
78 | ||
f3cf8bb0 JL |
79 | /** |
80 | * msi_domain_set_affinity - Generic affinity setter function for MSI domains | |
81 | * @irq_data: The irq data associated to the interrupt | |
82 | * @mask: The affinity mask to set | |
83 | * @force: Flag to enforce setting (disable online checks) | |
84 | * | |
85 | * Intended to be used by MSI interrupt controllers which are | |
86 | * implemented with hierarchical domains. | |
87 | */ | |
88 | int msi_domain_set_affinity(struct irq_data *irq_data, | |
89 | const struct cpumask *mask, bool force) | |
90 | { | |
91 | struct irq_data *parent = irq_data->parent_data; | |
92 | struct msi_msg msg; | |
93 | int ret; | |
94 | ||
95 | ret = parent->chip->irq_set_affinity(parent, mask, force); | |
96 | if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { | |
97 | BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); | |
98 | irq_chip_write_msi_msg(irq_data, &msg); | |
99 | } | |
100 | ||
101 | return ret; | |
102 | } | |
103 | ||
72491643 TG |
104 | static int msi_domain_activate(struct irq_domain *domain, |
105 | struct irq_data *irq_data, bool early) | |
f3cf8bb0 JL |
106 | { |
107 | struct msi_msg msg; | |
108 | ||
109 | BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); | |
110 | irq_chip_write_msi_msg(irq_data, &msg); | |
72491643 | 111 | return 0; |
f3cf8bb0 JL |
112 | } |
113 | ||
114 | static void msi_domain_deactivate(struct irq_domain *domain, | |
115 | struct irq_data *irq_data) | |
116 | { | |
117 | struct msi_msg msg; | |
118 | ||
119 | memset(&msg, 0, sizeof(msg)); | |
120 | irq_chip_write_msi_msg(irq_data, &msg); | |
121 | } | |
122 | ||
123 | static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
124 | unsigned int nr_irqs, void *arg) | |
125 | { | |
126 | struct msi_domain_info *info = domain->host_data; | |
127 | struct msi_domain_ops *ops = info->ops; | |
128 | irq_hw_number_t hwirq = ops->get_hwirq(info, arg); | |
129 | int i, ret; | |
130 | ||
131 | if (irq_find_mapping(domain, hwirq) > 0) | |
132 | return -EEXIST; | |
133 | ||
bf6f869f LJ |
134 | if (domain->parent) { |
135 | ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); | |
136 | if (ret < 0) | |
137 | return ret; | |
138 | } | |
f3cf8bb0 JL |
139 | |
140 | for (i = 0; i < nr_irqs; i++) { | |
141 | ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); | |
142 | if (ret < 0) { | |
143 | if (ops->msi_free) { | |
144 | for (i--; i > 0; i--) | |
145 | ops->msi_free(domain, info, virq + i); | |
146 | } | |
147 | irq_domain_free_irqs_top(domain, virq, nr_irqs); | |
148 | return ret; | |
149 | } | |
150 | } | |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
155 | static void msi_domain_free(struct irq_domain *domain, unsigned int virq, | |
156 | unsigned int nr_irqs) | |
157 | { | |
158 | struct msi_domain_info *info = domain->host_data; | |
159 | int i; | |
160 | ||
161 | if (info->ops->msi_free) { | |
162 | for (i = 0; i < nr_irqs; i++) | |
163 | info->ops->msi_free(domain, info, virq + i); | |
164 | } | |
165 | irq_domain_free_irqs_top(domain, virq, nr_irqs); | |
166 | } | |
167 | ||
01364028 | 168 | static const struct irq_domain_ops msi_domain_ops = { |
f3cf8bb0 JL |
169 | .alloc = msi_domain_alloc, |
170 | .free = msi_domain_free, | |
171 | .activate = msi_domain_activate, | |
172 | .deactivate = msi_domain_deactivate, | |
173 | }; | |
174 | ||
aeeb5965 JL |
175 | #ifdef GENERIC_MSI_DOMAIN_OPS |
176 | static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, | |
177 | msi_alloc_info_t *arg) | |
178 | { | |
179 | return arg->hwirq; | |
180 | } | |
181 | ||
182 | static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, | |
183 | int nvec, msi_alloc_info_t *arg) | |
184 | { | |
185 | memset(arg, 0, sizeof(*arg)); | |
186 | return 0; | |
187 | } | |
188 | ||
189 | static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, | |
190 | struct msi_desc *desc) | |
191 | { | |
192 | arg->desc = desc; | |
193 | } | |
194 | #else | |
195 | #define msi_domain_ops_get_hwirq NULL | |
196 | #define msi_domain_ops_prepare NULL | |
197 | #define msi_domain_ops_set_desc NULL | |
198 | #endif /* !GENERIC_MSI_DOMAIN_OPS */ | |
199 | ||
200 | static int msi_domain_ops_init(struct irq_domain *domain, | |
201 | struct msi_domain_info *info, | |
202 | unsigned int virq, irq_hw_number_t hwirq, | |
203 | msi_alloc_info_t *arg) | |
204 | { | |
205 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, | |
206 | info->chip_data); | |
207 | if (info->handler && info->handler_name) { | |
208 | __irq_set_handler(virq, info->handler, 0, info->handler_name); | |
209 | if (info->handler_data) | |
210 | irq_set_handler_data(virq, info->handler_data); | |
211 | } | |
212 | return 0; | |
213 | } | |
214 | ||
215 | static int msi_domain_ops_check(struct irq_domain *domain, | |
216 | struct msi_domain_info *info, | |
217 | struct device *dev) | |
218 | { | |
219 | return 0; | |
220 | } | |
221 | ||
222 | static struct msi_domain_ops msi_domain_ops_default = { | |
223 | .get_hwirq = msi_domain_ops_get_hwirq, | |
224 | .msi_init = msi_domain_ops_init, | |
225 | .msi_check = msi_domain_ops_check, | |
226 | .msi_prepare = msi_domain_ops_prepare, | |
227 | .set_desc = msi_domain_ops_set_desc, | |
228 | }; | |
229 | ||
230 | static void msi_domain_update_dom_ops(struct msi_domain_info *info) | |
231 | { | |
232 | struct msi_domain_ops *ops = info->ops; | |
233 | ||
234 | if (ops == NULL) { | |
235 | info->ops = &msi_domain_ops_default; | |
236 | return; | |
237 | } | |
238 | ||
239 | if (ops->get_hwirq == NULL) | |
240 | ops->get_hwirq = msi_domain_ops_default.get_hwirq; | |
241 | if (ops->msi_init == NULL) | |
242 | ops->msi_init = msi_domain_ops_default.msi_init; | |
243 | if (ops->msi_check == NULL) | |
244 | ops->msi_check = msi_domain_ops_default.msi_check; | |
245 | if (ops->msi_prepare == NULL) | |
246 | ops->msi_prepare = msi_domain_ops_default.msi_prepare; | |
247 | if (ops->set_desc == NULL) | |
248 | ops->set_desc = msi_domain_ops_default.set_desc; | |
249 | } | |
250 | ||
251 | static void msi_domain_update_chip_ops(struct msi_domain_info *info) | |
252 | { | |
253 | struct irq_chip *chip = info->chip; | |
254 | ||
0701c53e | 255 | BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); |
aeeb5965 JL |
256 | if (!chip->irq_set_affinity) |
257 | chip->irq_set_affinity = msi_domain_set_affinity; | |
258 | } | |
259 | ||
f3cf8bb0 JL |
260 | /** |
261 | * msi_create_irq_domain - Create a MSI interrupt domain | |
be5436c8 | 262 | * @fwnode: Optional fwnode of the interrupt controller |
f3cf8bb0 JL |
263 | * @info: MSI domain info |
264 | * @parent: Parent irq domain | |
265 | */ | |
be5436c8 | 266 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
f3cf8bb0 JL |
267 | struct msi_domain_info *info, |
268 | struct irq_domain *parent) | |
269 | { | |
a97b852b MZ |
270 | struct irq_domain *domain; |
271 | ||
aeeb5965 JL |
272 | if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) |
273 | msi_domain_update_dom_ops(info); | |
274 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | |
275 | msi_domain_update_chip_ops(info); | |
f3cf8bb0 | 276 | |
a97b852b MZ |
277 | domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, |
278 | fwnode, &msi_domain_ops, info); | |
0165308a TG |
279 | |
280 | if (domain && !domain->name && info->chip) | |
a97b852b MZ |
281 | domain->name = info->chip->name; |
282 | ||
283 | return domain; | |
f3cf8bb0 JL |
284 | } |
285 | ||
b2eba39b MZ |
286 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, |
287 | int nvec, msi_alloc_info_t *arg) | |
288 | { | |
289 | struct msi_domain_info *info = domain->host_data; | |
290 | struct msi_domain_ops *ops = info->ops; | |
291 | int ret; | |
292 | ||
293 | ret = ops->msi_check(domain, info, dev); | |
294 | if (ret == 0) | |
295 | ret = ops->msi_prepare(domain, dev, nvec, arg); | |
296 | ||
297 | return ret; | |
298 | } | |
299 | ||
2145ac93 MZ |
300 | int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, |
301 | int virq, int nvec, msi_alloc_info_t *arg) | |
302 | { | |
303 | struct msi_domain_info *info = domain->host_data; | |
304 | struct msi_domain_ops *ops = info->ops; | |
305 | struct msi_desc *desc; | |
306 | int ret = 0; | |
307 | ||
308 | for_each_msi_entry(desc, dev) { | |
309 | /* Don't even try the multi-MSI brain damage. */ | |
310 | if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { | |
311 | ret = -EINVAL; | |
312 | break; | |
313 | } | |
314 | ||
315 | if (!(desc->irq >= virq && desc->irq < (virq + nvec))) | |
316 | continue; | |
317 | ||
318 | ops->set_desc(arg, desc); | |
319 | /* Assumes the domain mutex is held! */ | |
596a7a1d JK |
320 | ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1, |
321 | arg); | |
2145ac93 MZ |
322 | if (ret) |
323 | break; | |
324 | ||
596a7a1d | 325 | irq_set_msi_desc_off(desc->irq, 0, desc); |
2145ac93 MZ |
326 | } |
327 | ||
328 | if (ret) { | |
329 | /* Mop up the damage */ | |
330 | for_each_msi_entry(desc, dev) { | |
331 | if (!(desc->irq >= virq && desc->irq < (virq + nvec))) | |
332 | continue; | |
333 | ||
334 | irq_domain_free_irqs_common(domain, desc->irq, 1); | |
335 | } | |
336 | } | |
337 | ||
338 | return ret; | |
339 | } | |
340 | ||
bc976233 TG |
341 | /* |
342 | * Carefully check whether the device can use reservation mode. If | |
343 | * reservation mode is enabled then the early activation will assign a | |
344 | * dummy vector to the device. If the PCI/MSI device does not support | |
345 | * masking of the entry then this can result in spurious interrupts when | |
346 | * the device driver is not absolutely careful. But even then a malfunction | |
347 | * of the hardware could result in a spurious interrupt on the dummy vector | |
348 | * and render the device unusable. If the entry can be masked then the core | |
349 | * logic will prevent the spurious interrupt and reservation mode can be | |
350 | * used. For now reservation mode is restricted to PCI/MSI. | |
351 | */ | |
352 | static bool msi_check_reservation_mode(struct irq_domain *domain, | |
353 | struct msi_domain_info *info, | |
354 | struct device *dev) | |
da5dd9e8 | 355 | { |
bc976233 TG |
356 | struct msi_desc *desc; |
357 | ||
358 | if (domain->bus_token != DOMAIN_BUS_PCI_MSI) | |
359 | return false; | |
360 | ||
da5dd9e8 TG |
361 | if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) |
362 | return false; | |
bc976233 TG |
363 | |
364 | if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) | |
365 | return false; | |
366 | ||
367 | /* | |
368 | * Checking the first MSI descriptor is sufficient. MSIX supports | |
369 | * masking and MSI does so when the maskbit is set. | |
370 | */ | |
371 | desc = first_msi_entry(dev); | |
372 | return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; | |
da5dd9e8 TG |
373 | } |
374 | ||
d9109698 JL |
375 | /** |
376 | * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain | |
377 | * @domain: The domain to allocate from | |
378 | * @dev: Pointer to device struct of the device for which the interrupts | |
379 | * are allocated | |
380 | * @nvec: The number of interrupts to allocate | |
381 | * | |
382 | * Returns 0 on success or an error code. | |
383 | */ | |
384 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | |
385 | int nvec) | |
386 | { | |
387 | struct msi_domain_info *info = domain->host_data; | |
388 | struct msi_domain_ops *ops = info->ops; | |
da5dd9e8 | 389 | struct irq_data *irq_data; |
d9109698 | 390 | struct msi_desc *desc; |
da5dd9e8 | 391 | msi_alloc_info_t arg; |
b6140914 | 392 | int i, ret, virq; |
da5dd9e8 | 393 | bool can_reserve; |
d9109698 | 394 | |
b2eba39b | 395 | ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); |
d9109698 JL |
396 | if (ret) |
397 | return ret; | |
398 | ||
399 | for_each_msi_entry(desc, dev) { | |
400 | ops->set_desc(&arg, desc); | |
401 | ||
b6140914 | 402 | virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, |
06ee6d57 | 403 | dev_to_node(dev), &arg, false, |
0972fa57 | 404 | desc->affinity); |
d9109698 JL |
405 | if (virq < 0) { |
406 | ret = -ENOSPC; | |
407 | if (ops->handle_error) | |
408 | ret = ops->handle_error(domain, desc, ret); | |
409 | if (ops->msi_finish) | |
410 | ops->msi_finish(&arg, ret); | |
411 | return ret; | |
412 | } | |
413 | ||
07557ccb | 414 | for (i = 0; i < desc->nvec_used; i++) { |
d9109698 | 415 | irq_set_msi_desc_off(virq, i, desc); |
07557ccb TG |
416 | irq_debugfs_copy_devname(virq + i, dev); |
417 | } | |
d9109698 JL |
418 | } |
419 | ||
420 | if (ops->msi_finish) | |
421 | ops->msi_finish(&arg, 0); | |
422 | ||
bc976233 | 423 | can_reserve = msi_check_reservation_mode(domain, info, dev); |
da5dd9e8 | 424 | |
d9109698 | 425 | for_each_msi_entry(desc, dev) { |
4364e1a2 | 426 | virq = desc->irq; |
d9109698 JL |
427 | if (desc->nvec_used == 1) |
428 | dev_dbg(dev, "irq %d for MSI\n", virq); | |
429 | else | |
430 | dev_dbg(dev, "irq [%d-%d] for MSI\n", | |
431 | virq, virq + desc->nvec_used - 1); | |
f3b0946d MZ |
432 | /* |
433 | * This flag is set by the PCI layer as we need to activate | |
434 | * the MSI entries before the PCI layer enables MSI in the | |
435 | * card. Otherwise the card latches a random msi message. | |
436 | */ | |
da5dd9e8 TG |
437 | if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) |
438 | continue; | |
f3b0946d | 439 | |
da5dd9e8 | 440 | irq_data = irq_domain_get_irq_data(domain, desc->irq); |
bc976233 TG |
441 | if (!can_reserve) |
442 | irqd_clr_can_reserve(irq_data); | |
443 | ret = irq_domain_activate_irq(irq_data, can_reserve); | |
da5dd9e8 TG |
444 | if (ret) |
445 | goto cleanup; | |
446 | } | |
447 | ||
448 | /* | |
449 | * If these interrupts use reservation mode, clear the activated bit | |
450 | * so request_irq() will assign the final vector. | |
451 | */ | |
452 | if (can_reserve) { | |
453 | for_each_msi_entry(desc, dev) { | |
f3b0946d | 454 | irq_data = irq_domain_get_irq_data(domain, desc->irq); |
da5dd9e8 | 455 | irqd_clr_activated(irq_data); |
f3b0946d | 456 | } |
d9109698 | 457 | } |
d9109698 | 458 | return 0; |
bb9b428a TG |
459 | |
460 | cleanup: | |
461 | for_each_msi_entry(desc, dev) { | |
462 | struct irq_data *irqd; | |
463 | ||
464 | if (desc->irq == virq) | |
465 | break; | |
466 | ||
467 | irqd = irq_domain_get_irq_data(domain, desc->irq); | |
468 | if (irqd_is_activated(irqd)) | |
469 | irq_domain_deactivate_irq(irqd); | |
470 | } | |
471 | msi_domain_free_irqs(domain, dev); | |
472 | return ret; | |
d9109698 JL |
473 | } |
474 | ||
475 | /** | |
476 | * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev | |
477 | * @domain: The domain to managing the interrupts | |
478 | * @dev: Pointer to device struct of the device for which the interrupts | |
479 | * are free | |
480 | */ | |
481 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) | |
482 | { | |
483 | struct msi_desc *desc; | |
484 | ||
485 | for_each_msi_entry(desc, dev) { | |
fe0c52fc MZ |
486 | /* |
487 | * We might have failed to allocate an MSI early | |
488 | * enough that there is no IRQ associated to this | |
489 | * entry. If that's the case, don't do anything. | |
490 | */ | |
491 | if (desc->irq) { | |
492 | irq_domain_free_irqs(desc->irq, desc->nvec_used); | |
493 | desc->irq = 0; | |
494 | } | |
d9109698 JL |
495 | } |
496 | } | |
497 | ||
f3cf8bb0 JL |
498 | /** |
499 | * msi_get_domain_info - Get the MSI interrupt domain info for @domain | |
500 | * @domain: The interrupt domain to retrieve data from | |
501 | * | |
502 | * Returns the pointer to the msi_domain_info stored in | |
503 | * @domain->host_data. | |
504 | */ | |
505 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) | |
506 | { | |
507 | return (struct msi_domain_info *)domain->host_data; | |
508 | } | |
509 | ||
510 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ |