1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/of_address.h>
14 #include <linux/of_pci.h>
15 #include <linux/pci_regs.h>
16 #include <linux/platform_device.h>
18 #include "pcie-designware.h"
20 static struct pci_ops dw_pcie_ops;
22 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
27 if (pp->ops->rd_own_conf)
28 return pp->ops->rd_own_conf(pp, where, size, val);
30 pci = to_dw_pcie_from_pp(pp);
31 return dw_pcie_read(pci->dbi_base + where, size, val);
34 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
39 if (pp->ops->wr_own_conf)
40 return pp->ops->wr_own_conf(pp, where, size, val);
42 pci = to_dw_pcie_from_pp(pp);
43 return dw_pcie_write(pci->dbi_base + where, size, val);
46 static void dw_msi_ack_irq(struct irq_data *d)
48 irq_chip_ack_parent(d);
51 static void dw_msi_mask_irq(struct irq_data *d)
54 irq_chip_mask_parent(d);
57 static void dw_msi_unmask_irq(struct irq_data *d)
59 pci_msi_unmask_irq(d);
60 irq_chip_unmask_parent(d);
63 static struct irq_chip dw_pcie_msi_irq_chip = {
65 .irq_ack = dw_msi_ack_irq,
66 .irq_mask = dw_msi_mask_irq,
67 .irq_unmask = dw_msi_unmask_irq,
70 static struct msi_domain_info dw_pcie_msi_domain_info = {
71 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
72 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
73 .chip = &dw_pcie_msi_irq_chip,
77 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
81 irqreturn_t ret = IRQ_NONE;
83 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
85 for (i = 0; i < num_ctrls; i++) {
86 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
93 while ((pos = find_next_bit((unsigned long *) &val, 32,
95 irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
96 generic_handle_irq(irq);
97 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
106 /* Chained MSI interrupt service routine */
107 static void dw_chained_msi_isr(struct irq_desc *desc)
109 struct irq_chip *chip = irq_desc_get_chip(desc);
110 struct pcie_port *pp;
112 chained_irq_enter(chip, desc);
114 pp = irq_desc_get_handler_data(desc);
115 dw_handle_msi_irq(pp);
117 chained_irq_exit(chip, desc);
120 static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
122 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
123 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
126 if (pp->ops->get_msi_addr)
127 msi_target = pp->ops->get_msi_addr(pp);
129 msi_target = (u64)pp->msi_data;
131 msg->address_lo = lower_32_bits(msi_target);
132 msg->address_hi = upper_32_bits(msi_target);
134 if (pp->ops->get_msi_data)
135 msg->data = pp->ops->get_msi_data(pp, data->hwirq);
137 msg->data = data->hwirq;
139 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
140 (int)data->hwirq, msg->address_hi, msg->address_lo);
143 static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
144 const struct cpumask *mask, bool force)
149 static void dw_pci_bottom_mask(struct irq_data *data)
151 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
152 unsigned int res, bit, ctrl;
155 raw_spin_lock_irqsave(&pp->lock, flags);
157 if (pp->ops->msi_clear_irq) {
158 pp->ops->msi_clear_irq(pp, data->hwirq);
160 ctrl = data->hwirq / 32;
162 bit = data->hwirq % 32;
164 pp->irq_status[ctrl] &= ~(1 << bit);
165 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
166 pp->irq_status[ctrl]);
169 raw_spin_unlock_irqrestore(&pp->lock, flags);
172 static void dw_pci_bottom_unmask(struct irq_data *data)
174 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
175 unsigned int res, bit, ctrl;
178 raw_spin_lock_irqsave(&pp->lock, flags);
180 if (pp->ops->msi_set_irq) {
181 pp->ops->msi_set_irq(pp, data->hwirq);
183 ctrl = data->hwirq / 32;
185 bit = data->hwirq % 32;
187 pp->irq_status[ctrl] |= 1 << bit;
188 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
189 pp->irq_status[ctrl]);
192 raw_spin_unlock_irqrestore(&pp->lock, flags);
195 static void dw_pci_bottom_ack(struct irq_data *d)
197 struct msi_desc *msi = irq_data_get_msi_desc(d);
198 struct pcie_port *pp;
200 pp = msi_desc_to_pci_sysdata(msi);
202 if (pp->ops->msi_irq_ack)
203 pp->ops->msi_irq_ack(d->hwirq, pp);
206 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
208 .irq_ack = dw_pci_bottom_ack,
209 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
210 .irq_set_affinity = dw_pci_msi_set_affinity,
211 .irq_mask = dw_pci_bottom_mask,
212 .irq_unmask = dw_pci_bottom_unmask,
215 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
216 unsigned int virq, unsigned int nr_irqs,
219 struct pcie_port *pp = domain->host_data;
224 raw_spin_lock_irqsave(&pp->lock, flags);
226 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
227 order_base_2(nr_irqs));
229 raw_spin_unlock_irqrestore(&pp->lock, flags);
234 for (i = 0; i < nr_irqs; i++)
235 irq_domain_set_info(domain, virq + i, bit + i,
236 &dw_pci_msi_bottom_irq_chip,
243 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
244 unsigned int virq, unsigned int nr_irqs)
246 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
247 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
250 raw_spin_lock_irqsave(&pp->lock, flags);
251 bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
252 order_base_2(nr_irqs));
253 raw_spin_unlock_irqrestore(&pp->lock, flags);
256 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
257 .alloc = dw_pcie_irq_domain_alloc,
258 .free = dw_pcie_irq_domain_free,
261 int dw_pcie_allocate_domains(struct pcie_port *pp)
263 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
264 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
266 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
267 &dw_pcie_msi_domain_ops, pp);
268 if (!pp->irq_domain) {
269 dev_err(pci->dev, "failed to create IRQ domain\n");
273 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
274 &dw_pcie_msi_domain_info,
276 if (!pp->msi_domain) {
277 dev_err(pci->dev, "failed to create MSI domain\n");
278 irq_domain_remove(pp->irq_domain);
285 void dw_pcie_free_msi(struct pcie_port *pp)
287 irq_set_chained_handler(pp->msi_irq, NULL);
288 irq_set_handler_data(pp->msi_irq, NULL);
290 irq_domain_remove(pp->msi_domain);
291 irq_domain_remove(pp->irq_domain);
294 void dw_pcie_msi_init(struct pcie_port *pp)
296 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
297 struct device *dev = pci->dev;
301 page = alloc_page(GFP_KERNEL);
302 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
303 if (dma_mapping_error(dev, pp->msi_data)) {
304 dev_err(dev, "failed to map MSI data\n");
308 msi_target = (u64)pp->msi_data;
310 /* program the msi_data */
311 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
312 lower_32_bits(msi_target));
313 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
314 upper_32_bits(msi_target));
317 int dw_pcie_host_init(struct pcie_port *pp)
319 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
320 struct device *dev = pci->dev;
321 struct device_node *np = dev->of_node;
322 struct platform_device *pdev = to_platform_device(dev);
323 struct resource_entry *win, *tmp;
324 struct pci_bus *bus, *child;
325 struct pci_host_bridge *bridge;
326 struct resource *cfg_res;
329 raw_spin_lock_init(&pci->pp.lock);
331 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
333 pp->cfg0_size = resource_size(cfg_res) / 2;
334 pp->cfg1_size = resource_size(cfg_res) / 2;
335 pp->cfg0_base = cfg_res->start;
336 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
337 } else if (!pp->va_cfg0_base) {
338 dev_err(dev, "missing *config* reg space\n");
341 bridge = pci_alloc_host_bridge(0);
345 ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
346 &bridge->windows, &pp->io_base);
350 ret = devm_request_pci_bus_resources(dev, &bridge->windows);
354 /* Get the I/O and memory ranges from DT */
355 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
356 switch (resource_type(win->res)) {
358 ret = pci_remap_iospace(win->res, pp->io_base);
360 dev_warn(dev, "error %d: failed to map resource %pR\n",
362 resource_list_destroy_entry(win);
365 pp->io->name = "I/O";
366 pp->io_size = resource_size(pp->io);
367 pp->io_bus_addr = pp->io->start - win->offset;
372 pp->mem->name = "MEM";
373 pp->mem_size = resource_size(pp->mem);
374 pp->mem_bus_addr = pp->mem->start - win->offset;
378 pp->cfg0_size = resource_size(pp->cfg) / 2;
379 pp->cfg1_size = resource_size(pp->cfg) / 2;
380 pp->cfg0_base = pp->cfg->start;
381 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
389 if (!pci->dbi_base) {
390 pci->dbi_base = devm_pci_remap_cfgspace(dev,
392 resource_size(pp->cfg));
393 if (!pci->dbi_base) {
394 dev_err(dev, "error with ioremap\n");
400 pp->mem_base = pp->mem->start;
402 if (!pp->va_cfg0_base) {
403 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
404 pp->cfg0_base, pp->cfg0_size);
405 if (!pp->va_cfg0_base) {
406 dev_err(dev, "error with ioremap in function\n");
412 if (!pp->va_cfg1_base) {
413 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
416 if (!pp->va_cfg1_base) {
417 dev_err(dev, "error with ioremap\n");
423 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
425 pci->num_viewport = 2;
427 if (IS_ENABLED(CONFIG_PCI_MSI)) {
429 * If a specific SoC driver needs to change the
430 * default number of vectors, it needs to implement
431 * the set_num_vectors callback.
433 if (!pp->ops->set_num_vectors) {
434 pp->num_vectors = MSI_DEF_NUM_VECTORS;
436 pp->ops->set_num_vectors(pp);
438 if (pp->num_vectors > MAX_MSI_IRQS ||
439 pp->num_vectors == 0) {
441 "Invalid number of vectors\n");
446 if (!pp->ops->msi_host_init) {
447 ret = dw_pcie_allocate_domains(pp);
452 irq_set_chained_handler_and_data(pp->msi_irq,
456 ret = pp->ops->msi_host_init(pp);
462 if (pp->ops->host_init) {
463 ret = pp->ops->host_init(pp);
468 pp->root_bus_nr = pp->busn->start;
470 bridge->dev.parent = dev;
471 bridge->sysdata = pp;
472 bridge->busnr = pp->root_bus_nr;
473 bridge->ops = &dw_pcie_ops;
474 bridge->map_irq = of_irq_parse_and_map_pci;
475 bridge->swizzle_irq = pci_common_swizzle;
477 ret = pci_scan_root_bus_bridge(bridge);
483 if (pp->ops->scan_bus)
484 pp->ops->scan_bus(pp);
486 pci_bus_size_bridges(bus);
487 pci_bus_assign_resources(bus);
489 list_for_each_entry(child, &bus->children, node)
490 pcie_bus_configure_settings(child);
492 pci_bus_add_devices(bus);
496 pci_free_host_bridge(bridge);
500 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
501 u32 devfn, int where, int size, u32 *val)
504 u32 busdev, cfg_size;
506 void __iomem *va_cfg_base;
507 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
509 if (pp->ops->rd_other_conf)
510 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
512 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
513 PCIE_ATU_FUNC(PCI_FUNC(devfn));
515 if (bus->parent->number == pp->root_bus_nr) {
516 type = PCIE_ATU_TYPE_CFG0;
517 cpu_addr = pp->cfg0_base;
518 cfg_size = pp->cfg0_size;
519 va_cfg_base = pp->va_cfg0_base;
521 type = PCIE_ATU_TYPE_CFG1;
522 cpu_addr = pp->cfg1_base;
523 cfg_size = pp->cfg1_size;
524 va_cfg_base = pp->va_cfg1_base;
527 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
530 ret = dw_pcie_read(va_cfg_base + where, size, val);
531 if (pci->num_viewport <= 2)
532 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
533 PCIE_ATU_TYPE_IO, pp->io_base,
534 pp->io_bus_addr, pp->io_size);
539 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
540 u32 devfn, int where, int size, u32 val)
543 u32 busdev, cfg_size;
545 void __iomem *va_cfg_base;
546 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
548 if (pp->ops->wr_other_conf)
549 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
551 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
552 PCIE_ATU_FUNC(PCI_FUNC(devfn));
554 if (bus->parent->number == pp->root_bus_nr) {
555 type = PCIE_ATU_TYPE_CFG0;
556 cpu_addr = pp->cfg0_base;
557 cfg_size = pp->cfg0_size;
558 va_cfg_base = pp->va_cfg0_base;
560 type = PCIE_ATU_TYPE_CFG1;
561 cpu_addr = pp->cfg1_base;
562 cfg_size = pp->cfg1_size;
563 va_cfg_base = pp->va_cfg1_base;
566 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
569 ret = dw_pcie_write(va_cfg_base + where, size, val);
570 if (pci->num_viewport <= 2)
571 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
572 PCIE_ATU_TYPE_IO, pp->io_base,
573 pp->io_bus_addr, pp->io_size);
578 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
581 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
583 /* If there is no link, then there is no device */
584 if (bus->number != pp->root_bus_nr) {
585 if (!dw_pcie_link_up(pci))
589 /* access only one slot on each root port */
590 if (bus->number == pp->root_bus_nr && dev > 0)
596 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
599 struct pcie_port *pp = bus->sysdata;
601 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
603 return PCIBIOS_DEVICE_NOT_FOUND;
606 if (bus->number == pp->root_bus_nr)
607 return dw_pcie_rd_own_conf(pp, where, size, val);
609 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
612 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
613 int where, int size, u32 val)
615 struct pcie_port *pp = bus->sysdata;
617 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
618 return PCIBIOS_DEVICE_NOT_FOUND;
620 if (bus->number == pp->root_bus_nr)
621 return dw_pcie_wr_own_conf(pp, where, size, val);
623 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
626 static struct pci_ops dw_pcie_ops = {
627 .read = dw_pcie_rd_conf,
628 .write = dw_pcie_wr_conf,
631 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
635 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
636 if (val == 0xffffffff)
642 void dw_pcie_setup_rc(struct pcie_port *pp)
644 u32 val, ctrl, num_ctrls;
645 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
649 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
651 /* Initialize IRQ Status array */
652 for (ctrl = 0; ctrl < num_ctrls; ctrl++)
653 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
654 &pp->irq_status[ctrl]);
656 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
657 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
659 /* setup interrupt pins */
660 dw_pcie_dbi_ro_wr_en(pci);
661 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
664 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
665 dw_pcie_dbi_ro_wr_dis(pci);
667 /* setup bus numbers */
668 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
671 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
673 /* setup command register */
674 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
676 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
677 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
678 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
681 * If the platform provides ->rd_other_conf, it means the platform
682 * uses its own address translation component rather than ATU, so
683 * we should not program the ATU here.
685 if (!pp->ops->rd_other_conf) {
686 /* get iATU unroll support */
687 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
688 dev_dbg(pci->dev, "iATU unroll: %s\n",
689 pci->iatu_unroll_enabled ? "enabled" : "disabled");
691 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
692 PCIE_ATU_TYPE_MEM, pp->mem_base,
693 pp->mem_bus_addr, pp->mem_size);
694 if (pci->num_viewport > 2)
695 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
696 PCIE_ATU_TYPE_IO, pp->io_base,
697 pp->io_bus_addr, pp->io_size);
700 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
702 /* Enable write permission for the DBI read-only register */
703 dw_pcie_dbi_ro_wr_en(pci);
704 /* program correct class for RC */
705 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
706 /* Better disable write permission right after the update */
707 dw_pcie_dbi_ro_wr_dis(pci);
709 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
710 val |= PORT_LOGIC_SPEED_CHANGE;
711 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);