1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007, Michael Ellerman, IBM Corporation.
7 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/msi.h>
12 #include <linux/export.h>
13 #include <linux/slab.h>
14 #include <linux/debugfs.h>
16 #include <linux/of_irq.h>
17 #include <linux/platform_device.h>
20 #include <asm/machdep.h>
25 * MSIC registers, specified as offsets from dcr_base
27 #define MSIC_CTRL_REG 0x0
29 /* Base Address registers specify FIFO location in BE memory */
30 #define MSIC_BASE_ADDR_HI_REG 0x3
31 #define MSIC_BASE_ADDR_LO_REG 0x4
33 /* Hold the read/write offsets into the FIFO */
34 #define MSIC_READ_OFFSET_REG 0x5
35 #define MSIC_WRITE_OFFSET_REG 0x6
38 /* MSIC control register flags */
39 #define MSIC_CTRL_ENABLE 0x0001
40 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
41 #define MSIC_CTRL_IRQ_ENABLE 0x0008
42 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
45 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
46 * Currently we're using a 64KB FIFO size.
48 #define MSIC_FIFO_SIZE_SHIFT 16
49 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
52 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
53 * 8-9 of the MSIC control reg.
55 #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
58 * We need to mask the read/write offsets to make sure they stay within
59 * the bounds of the FIFO. Also they should always be 16-byte aligned.
61 #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
63 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
64 #define MSIC_FIFO_ENTRY_SIZE 0x10
68 struct irq_domain *irq_domain;
79 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
81 static inline void axon_msi_debug_setup(struct device_node *dn,
82 struct axon_msic *msic) { }
86 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
88 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
90 dcr_write(msic->dcr_host, dcr_n, val);
93 static void axon_msi_cascade(struct irq_desc *desc)
95 struct irq_chip *chip = irq_desc_get_chip(desc);
96 struct axon_msic *msic = irq_desc_get_handler_data(desc);
97 u32 write_offset, msi;
101 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
102 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
104 /* write_offset doesn't wrap properly, so we have to mask it */
105 write_offset &= MSIC_FIFO_SIZE_MASK;
107 while (msic->read_offset != write_offset && retry < 100) {
108 idx = msic->read_offset / sizeof(__le32);
109 msi = le32_to_cpu(msic->fifo_virt[idx]);
112 pr_devel("axon_msi: woff %x roff %x msi %x\n",
113 write_offset, msic->read_offset, msi);
115 if (msi < irq_get_nr_irqs() && irq_get_chip_data(msi) == msic) {
116 generic_handle_irq(msi);
117 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
120 * Reading the MSIC_WRITE_OFFSET_REG does not
121 * reliably flush the outstanding DMA to the
122 * FIFO buffer. Here we were reading stale
123 * data, so we need to retry.
127 pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
132 pr_devel("axon_msi: late irq 0x%x, retry %d\n",
137 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
138 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
142 printk(KERN_WARNING "axon_msi: irq timed out\n");
144 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
145 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
148 chip->irq_eoi(&desc->irq_data);
151 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
153 struct irq_domain *irq_domain;
154 struct device_node *dn, *tmp;
156 struct axon_msic *msic = NULL;
158 dn = of_node_get(pci_device_to_OF_node(dev));
160 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
164 for (; dn; dn = of_get_next_parent(dn)) {
165 ph = of_get_property(dn, "msi-translator", NULL);
172 "axon_msi: no msi-translator property found\n");
177 dn = of_find_node_by_phandle(*ph);
181 "axon_msi: msi-translator doesn't point to a node\n");
185 irq_domain = irq_find_host(dn);
187 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
192 msic = irq_domain->host_data;
200 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
202 struct device_node *dn;
206 dn = of_node_get(pci_device_to_OF_node(dev));
208 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
212 for (; dn; dn = of_get_next_parent(dn)) {
213 if (!dev->no_64bit_msi) {
214 prop = of_get_property(dn, "msi-address-64", &len);
219 prop = of_get_property(dn, "msi-address-32", &len);
226 "axon_msi: no msi-address-(32|64) properties found\n");
233 msg->address_hi = prop[0];
234 msg->address_lo = prop[1];
238 msg->address_lo = prop[0];
242 "axon_msi: malformed msi-address-(32|64) property\n");
252 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
254 unsigned int virq, rc;
255 struct msi_desc *entry;
257 struct axon_msic *msic;
259 msic = find_msi_translator(dev);
263 rc = setup_msi_msg_address(dev, &msg);
267 msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
268 virq = irq_create_direct_mapping(msic->irq_domain);
271 "axon_msi: virq allocation failed!\n");
274 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
276 irq_set_msi_desc(virq, entry);
278 pci_write_msi_msg(virq, &msg);
284 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
286 struct msi_desc *entry;
288 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
290 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
291 irq_set_msi_desc(entry->irq, NULL);
292 irq_dispose_mapping(entry->irq);
297 static struct irq_chip msic_irq_chip = {
298 .irq_mask = pci_msi_mask_irq,
299 .irq_unmask = pci_msi_unmask_irq,
300 .irq_shutdown = pci_msi_mask_irq,
304 static int msic_host_map(struct irq_domain *h, unsigned int virq,
307 irq_set_chip_data(virq, h->host_data);
308 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
313 static const struct irq_domain_ops msic_host_ops = {
314 .map = msic_host_map,
317 static void axon_msi_shutdown(struct platform_device *device)
319 struct axon_msic *msic = dev_get_drvdata(&device->dev);
322 pr_devel("axon_msi: disabling %pOF\n",
323 irq_domain_get_of_node(msic->irq_domain));
324 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
325 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
326 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
329 static int axon_msi_probe(struct platform_device *device)
331 struct device_node *dn = device->dev.of_node;
332 struct axon_msic *msic;
334 int dcr_base, dcr_len;
336 pr_devel("axon_msi: setting up dn %pOF\n", dn);
338 msic = kzalloc(sizeof(*msic), GFP_KERNEL);
340 printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
345 dcr_base = dcr_resource_start(dn, 0);
346 dcr_len = dcr_resource_len(dn, 0);
348 if (dcr_base == 0 || dcr_len == 0) {
350 "axon_msi: couldn't parse dcr properties on %pOF\n",
355 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
356 if (!DCR_MAP_OK(msic->dcr_host)) {
357 printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
362 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
363 &msic->fifo_phys, GFP_KERNEL);
364 if (!msic->fifo_virt) {
365 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
370 virq = irq_of_parse_and_map(dn, 0);
372 printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
376 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
378 /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
379 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
380 if (!msic->irq_domain) {
381 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
386 irq_set_handler_data(virq, msic);
387 irq_set_chained_handler(virq, axon_msi_cascade);
388 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
390 /* Enable the MSIC hardware */
391 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
392 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
393 msic->fifo_phys & 0xFFFFFFFF);
394 msic_dcr_write(msic, MSIC_CTRL_REG,
395 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
396 MSIC_CTRL_FIFO_SIZE);
398 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
399 & MSIC_FIFO_SIZE_MASK;
401 dev_set_drvdata(&device->dev, msic);
403 cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
404 cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
406 axon_msi_debug_setup(dn, msic);
408 printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
413 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
422 static const struct of_device_id axon_msi_device_id[] = {
424 .compatible = "ibm,axon-msic"
429 static struct platform_driver axon_msi_driver = {
430 .probe = axon_msi_probe,
431 .shutdown = axon_msi_shutdown,
434 .of_match_table = axon_msi_device_id,
438 static int __init axon_msi_init(void)
440 return platform_driver_register(&axon_msi_driver);
442 subsys_initcall(axon_msi_init);
446 static int msic_set(void *data, u64 val)
448 struct axon_msic *msic = data;
449 out_le32(msic->trigger, val);
453 static int msic_get(void *data, u64 *val)
459 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
461 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
466 if (of_address_to_resource(dn, 0, &res)) {
467 pr_devel("axon_msi: couldn't get reg property\n");
471 msic->trigger = ioremap(res.start, 0x4);
472 if (!msic->trigger) {
473 pr_devel("axon_msi: ioremap failed\n");
477 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
479 debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic);