1 // SPDX-License-Identifier: GPL-2.0-only
3 * Freescale SCFG MSI(-X) support
5 * Copyright (C) 2016 Freescale Semiconductor.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_pci.h>
19 #include <linux/of_platform.h>
20 #include <linux/spinlock.h>
21 #include <linux/dma-iommu.h>
23 #define MSI_IRQS_PER_MSIR 32
24 #define MSI_MSIR_OFFSET 4
26 #define MSI_LS1043V1_1_IRQS_PER_MSIR 8
27 #define MSI_LS1043V1_1_MSIR_OFFSET 0x10
29 struct ls_scfg_msi_cfg {
30 u32 ibs_shift; /* Shift of interrupt bit select */
31 u32 msir_irqs; /* The irq number per MSIR */
32 u32 msir_base; /* The base address of MSIR */
36 struct ls_scfg_msi *msi_data;
39 unsigned int bit_start;
41 unsigned int srs; /* Shared interrupt register select */
47 struct platform_device *pdev;
48 struct irq_domain *parent;
49 struct irq_domain *msi_domain;
51 phys_addr_t msiir_addr;
52 struct ls_scfg_msi_cfg *cfg;
54 struct ls_scfg_msir *msir;
59 static struct irq_chip ls_scfg_msi_irq_chip = {
61 .irq_mask = pci_msi_mask_irq,
62 .irq_unmask = pci_msi_unmask_irq,
65 static struct msi_domain_info ls_scfg_msi_domain_info = {
66 .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
67 MSI_FLAG_USE_DEF_CHIP_OPS |
69 .chip = &ls_scfg_msi_irq_chip,
72 static int msi_affinity_flag = 1;
74 static int __init early_parse_ls_scfg_msi(char *p)
76 if (p && strncmp(p, "no-affinity", 11) == 0)
77 msi_affinity_flag = 0;
79 msi_affinity_flag = 1;
83 early_param("lsmsi", early_parse_ls_scfg_msi);
85 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
87 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
89 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
90 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
91 msg->data = data->hwirq;
93 if (msi_affinity_flag) {
94 const struct cpumask *mask;
96 mask = irq_data_get_effective_affinity_mask(data);
97 msg->data |= cpumask_first(mask);
100 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
103 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
104 const struct cpumask *mask, bool force)
106 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
109 if (!msi_affinity_flag)
113 cpu = cpumask_any_and(mask, cpu_online_mask);
115 cpu = cpumask_first(mask);
117 if (cpu >= msi_data->msir_num)
120 if (msi_data->msir[cpu].gic_irq <= 0) {
121 pr_warn("cannot bind the irq to cpu%d\n", cpu);
125 irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
127 return IRQ_SET_MASK_OK;
130 static struct irq_chip ls_scfg_msi_parent_chip = {
132 .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
133 .irq_set_affinity = ls_scfg_msi_set_affinity,
136 static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
138 unsigned int nr_irqs,
141 msi_alloc_info_t *info = args;
142 struct ls_scfg_msi *msi_data = domain->host_data;
145 WARN_ON(nr_irqs != 1);
147 spin_lock(&msi_data->lock);
148 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
149 if (pos < msi_data->irqs_num)
150 __set_bit(pos, msi_data->used);
153 spin_unlock(&msi_data->lock);
158 err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
162 irq_domain_set_info(domain, virq, pos,
163 &ls_scfg_msi_parent_chip, msi_data,
164 handle_simple_irq, NULL, NULL);
169 static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
170 unsigned int virq, unsigned int nr_irqs)
172 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
173 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
177 if (pos < 0 || pos >= msi_data->irqs_num) {
178 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
182 spin_lock(&msi_data->lock);
183 __clear_bit(pos, msi_data->used);
184 spin_unlock(&msi_data->lock);
187 static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
188 .alloc = ls_scfg_msi_domain_irq_alloc,
189 .free = ls_scfg_msi_domain_irq_free,
192 static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
194 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
195 struct ls_scfg_msi *msi_data = msir->msi_data;
197 int pos, size, virq, hwirq;
199 chained_irq_enter(irq_desc_get_chip(desc), desc);
201 val = ioread32be(msir->reg);
203 pos = msir->bit_start;
204 size = msir->bit_end + 1;
206 for_each_set_bit_from(pos, &val, size) {
207 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
209 virq = irq_find_mapping(msi_data->parent, hwirq);
211 generic_handle_irq(virq);
214 chained_irq_exit(irq_desc_get_chip(desc), desc);
217 static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
219 /* Initialize MSI domain parent */
220 msi_data->parent = irq_domain_add_linear(NULL,
222 &ls_scfg_msi_domain_ops,
224 if (!msi_data->parent) {
225 dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
229 msi_data->msi_domain = pci_msi_create_irq_domain(
230 of_node_to_fwnode(msi_data->pdev->dev.of_node),
231 &ls_scfg_msi_domain_info,
233 if (!msi_data->msi_domain) {
234 dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
235 irq_domain_remove(msi_data->parent);
242 static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
244 struct ls_scfg_msir *msir;
247 virq = platform_get_irq(msi_data->pdev, index);
251 msir = &msi_data->msir[index];
253 msir->msi_data = msi_data;
254 msir->gic_irq = virq;
255 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
257 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
258 msir->bit_start = 32 - ((msir->index + 1) *
259 MSI_LS1043V1_1_IRQS_PER_MSIR);
260 msir->bit_end = msir->bit_start +
261 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
264 msir->bit_end = msi_data->cfg->msir_irqs - 1;
267 irq_set_chained_handler_and_data(msir->gic_irq,
268 ls_scfg_msi_irq_handler,
271 if (msi_affinity_flag) {
272 /* Associate MSIR interrupt to the cpu */
273 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
274 msir->srs = 0; /* This value is determined by the CPU */
278 /* Release the hwirqs corresponding to this MSIR */
279 if (!msi_affinity_flag || msir->index == 0) {
280 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
281 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
282 bitmap_clear(msi_data->used, hwirq, 1);
289 static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
291 struct ls_scfg_msi *msi_data = msir->msi_data;
294 if (msir->gic_irq > 0)
295 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
297 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
298 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
299 bitmap_set(msi_data->used, hwirq, 1);
305 static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
307 .msir_irqs = MSI_IRQS_PER_MSIR,
308 .msir_base = MSI_MSIR_OFFSET,
311 static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
313 .msir_irqs = MSI_IRQS_PER_MSIR,
314 .msir_base = MSI_MSIR_OFFSET,
317 static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
319 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
320 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
323 static const struct of_device_id ls_scfg_msi_id[] = {
324 /* The following two misspelled compatibles are obsolete */
325 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
326 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
328 { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
329 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
330 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
331 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
332 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
335 MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
337 static int ls_scfg_msi_probe(struct platform_device *pdev)
339 const struct of_device_id *match;
340 struct ls_scfg_msi *msi_data;
341 struct resource *res;
344 match = of_match_device(ls_scfg_msi_id, &pdev->dev);
348 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
352 msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
354 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
355 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
356 if (IS_ERR(msi_data->regs)) {
357 dev_err(&pdev->dev, "failed to initialize 'regs'\n");
358 return PTR_ERR(msi_data->regs);
360 msi_data->msiir_addr = res->start;
362 msi_data->pdev = pdev;
363 spin_lock_init(&msi_data->lock);
365 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
366 (1 << msi_data->cfg->ibs_shift);
367 msi_data->used = devm_kcalloc(&pdev->dev,
368 BITS_TO_LONGS(msi_data->irqs_num),
369 sizeof(*msi_data->used),
374 * Reserve all the hwirqs
375 * The available hwirqs will be released in ls1_msi_setup_hwirq()
377 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
379 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
381 if (msi_affinity_flag) {
384 cpu_num = num_possible_cpus();
385 if (msi_data->msir_num >= cpu_num)
386 msi_data->msir_num = cpu_num;
388 msi_affinity_flag = 0;
391 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
392 sizeof(*msi_data->msir),
397 for (i = 0; i < msi_data->msir_num; i++)
398 ls_scfg_msi_setup_hwirq(msi_data, i);
400 ret = ls_scfg_msi_domains_init(msi_data);
404 platform_set_drvdata(pdev, msi_data);
409 static int ls_scfg_msi_remove(struct platform_device *pdev)
411 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
414 for (i = 0; i < msi_data->msir_num; i++)
415 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
417 irq_domain_remove(msi_data->msi_domain);
418 irq_domain_remove(msi_data->parent);
420 platform_set_drvdata(pdev, NULL);
425 static struct platform_driver ls_scfg_msi_driver = {
427 .name = "ls-scfg-msi",
428 .of_match_table = ls_scfg_msi_id,
430 .probe = ls_scfg_msi_probe,
431 .remove = ls_scfg_msi_remove,
434 module_platform_driver(ls_scfg_msi_driver);
437 MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
438 MODULE_LICENSE("GPL v2");