1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
16 #include <linux/pci_regs.h>
17 #include <linux/platform_device.h>
19 #include "../../pci.h"
20 #include "pcie-designware.h"
22 static struct pci_ops dw_pcie_ops;
24 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
29 if (pp->ops->rd_own_conf)
30 return pp->ops->rd_own_conf(pp, where, size, val);
32 pci = to_dw_pcie_from_pp(pp);
33 return dw_pcie_read(pci->dbi_base + where, size, val);
36 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
41 if (pp->ops->wr_own_conf)
42 return pp->ops->wr_own_conf(pp, where, size, val);
44 pci = to_dw_pcie_from_pp(pp);
45 return dw_pcie_write(pci->dbi_base + where, size, val);
48 static void dw_msi_ack_irq(struct irq_data *d)
50 irq_chip_ack_parent(d);
53 static void dw_msi_mask_irq(struct irq_data *d)
56 irq_chip_mask_parent(d);
59 static void dw_msi_unmask_irq(struct irq_data *d)
61 pci_msi_unmask_irq(d);
62 irq_chip_unmask_parent(d);
65 static struct irq_chip dw_pcie_msi_irq_chip = {
67 .irq_ack = dw_msi_ack_irq,
68 .irq_mask = dw_msi_mask_irq,
69 .irq_unmask = dw_msi_unmask_irq,
72 static struct msi_domain_info dw_pcie_msi_domain_info = {
73 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
74 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
75 .chip = &dw_pcie_msi_irq_chip,
79 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
83 u32 status, num_ctrls;
84 irqreturn_t ret = IRQ_NONE;
86 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
88 for (i = 0; i < num_ctrls; i++) {
89 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
90 (i * MSI_REG_CTRL_BLOCK_SIZE),
98 while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
99 pos)) != MAX_MSI_IRQS_PER_CTRL) {
100 irq = irq_find_mapping(pp->irq_domain,
101 (i * MAX_MSI_IRQS_PER_CTRL) +
103 generic_handle_irq(irq);
111 /* Chained MSI interrupt service routine */
112 static void dw_chained_msi_isr(struct irq_desc *desc)
114 struct irq_chip *chip = irq_desc_get_chip(desc);
115 struct pcie_port *pp;
117 chained_irq_enter(chip, desc);
119 pp = irq_desc_get_handler_data(desc);
120 dw_handle_msi_irq(pp);
122 chained_irq_exit(chip, desc);
125 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
127 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
128 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
131 msi_target = (u64)pp->msi_data;
133 msg->address_lo = lower_32_bits(msi_target);
134 msg->address_hi = upper_32_bits(msi_target);
136 msg->data = d->hwirq;
138 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
139 (int)d->hwirq, msg->address_hi, msg->address_lo);
142 static int dw_pci_msi_set_affinity(struct irq_data *d,
143 const struct cpumask *mask, bool force)
148 static void dw_pci_bottom_mask(struct irq_data *d)
150 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
151 unsigned int res, bit, ctrl;
154 raw_spin_lock_irqsave(&pp->lock, flags);
156 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
157 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
158 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
160 pp->irq_mask[ctrl] |= BIT(bit);
161 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
164 raw_spin_unlock_irqrestore(&pp->lock, flags);
167 static void dw_pci_bottom_unmask(struct irq_data *d)
169 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
170 unsigned int res, bit, ctrl;
173 raw_spin_lock_irqsave(&pp->lock, flags);
175 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
176 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
177 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
179 pp->irq_mask[ctrl] &= ~BIT(bit);
180 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
183 raw_spin_unlock_irqrestore(&pp->lock, flags);
186 static void dw_pci_bottom_ack(struct irq_data *d)
188 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
189 unsigned int res, bit, ctrl;
191 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
192 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
193 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
195 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
198 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
200 .irq_ack = dw_pci_bottom_ack,
201 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
202 .irq_set_affinity = dw_pci_msi_set_affinity,
203 .irq_mask = dw_pci_bottom_mask,
204 .irq_unmask = dw_pci_bottom_unmask,
207 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
208 unsigned int virq, unsigned int nr_irqs,
211 struct pcie_port *pp = domain->host_data;
216 raw_spin_lock_irqsave(&pp->lock, flags);
218 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
219 order_base_2(nr_irqs));
221 raw_spin_unlock_irqrestore(&pp->lock, flags);
226 for (i = 0; i < nr_irqs; i++)
227 irq_domain_set_info(domain, virq + i, bit + i,
235 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
236 unsigned int virq, unsigned int nr_irqs)
238 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
239 struct pcie_port *pp = domain->host_data;
242 raw_spin_lock_irqsave(&pp->lock, flags);
244 bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
245 order_base_2(nr_irqs));
247 raw_spin_unlock_irqrestore(&pp->lock, flags);
250 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
251 .alloc = dw_pcie_irq_domain_alloc,
252 .free = dw_pcie_irq_domain_free,
255 int dw_pcie_allocate_domains(struct pcie_port *pp)
257 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
258 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
260 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
261 &dw_pcie_msi_domain_ops, pp);
262 if (!pp->irq_domain) {
263 dev_err(pci->dev, "Failed to create IRQ domain\n");
267 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
269 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
270 &dw_pcie_msi_domain_info,
272 if (!pp->msi_domain) {
273 dev_err(pci->dev, "Failed to create MSI domain\n");
274 irq_domain_remove(pp->irq_domain);
281 void dw_pcie_free_msi(struct pcie_port *pp)
284 irq_set_chained_handler(pp->msi_irq, NULL);
285 irq_set_handler_data(pp->msi_irq, NULL);
288 irq_domain_remove(pp->msi_domain);
289 irq_domain_remove(pp->irq_domain);
292 __free_page(pp->msi_page);
295 void dw_pcie_msi_init(struct pcie_port *pp)
297 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
298 struct device *dev = pci->dev;
301 pp->msi_page = alloc_page(GFP_KERNEL);
302 pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
304 if (dma_mapping_error(dev, pp->msi_data)) {
305 dev_err(dev, "Failed to map MSI data\n");
306 __free_page(pp->msi_page);
310 msi_target = (u64)pp->msi_data;
312 /* Program the msi_data */
313 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
314 lower_32_bits(msi_target));
315 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
316 upper_32_bits(msi_target));
318 EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
320 int dw_pcie_host_init(struct pcie_port *pp)
322 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
323 struct device *dev = pci->dev;
324 struct device_node *np = dev->of_node;
325 struct platform_device *pdev = to_platform_device(dev);
326 struct resource_entry *win;
327 struct pci_bus *child;
328 struct pci_host_bridge *bridge;
329 struct resource *cfg_res;
333 raw_spin_lock_init(&pci->pp.lock);
335 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
337 pp->cfg0_size = resource_size(cfg_res) >> 1;
338 pp->cfg1_size = resource_size(cfg_res) >> 1;
339 pp->cfg0_base = cfg_res->start;
340 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
341 } else if (!pp->va_cfg0_base) {
342 dev_err(dev, "Missing *config* reg space\n");
345 bridge = devm_pci_alloc_host_bridge(dev, 0);
349 /* Get the I/O and memory ranges from DT */
350 resource_list_for_each_entry(win, &bridge->windows) {
351 switch (resource_type(win->res)) {
354 pp->io->name = "I/O";
355 pp->io_size = resource_size(pp->io);
356 pp->io_bus_addr = pp->io->start - win->offset;
357 pp->io_base = pci_pio_to_address(pp->io->start);
361 pp->mem->name = "MEM";
362 pp->mem_size = resource_size(pp->mem);
363 pp->mem_bus_addr = pp->mem->start - win->offset;
367 pp->cfg0_size = resource_size(pp->cfg) >> 1;
368 pp->cfg1_size = resource_size(pp->cfg) >> 1;
369 pp->cfg0_base = pp->cfg->start;
370 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
378 if (!pci->dbi_base) {
379 pci->dbi_base = devm_pci_remap_cfgspace(dev,
381 resource_size(pp->cfg));
382 if (!pci->dbi_base) {
383 dev_err(dev, "Error with ioremap\n");
388 pp->mem_base = pp->mem->start;
390 if (!pp->va_cfg0_base) {
391 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
392 pp->cfg0_base, pp->cfg0_size);
393 if (!pp->va_cfg0_base) {
394 dev_err(dev, "Error with ioremap in function\n");
399 if (!pp->va_cfg1_base) {
400 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
403 if (!pp->va_cfg1_base) {
404 dev_err(dev, "Error with ioremap\n");
409 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
411 pci->num_viewport = 2;
413 if (pci_msi_enabled()) {
415 * If a specific SoC driver needs to change the
416 * default number of vectors, it needs to implement
417 * the set_num_vectors callback.
419 if (!pp->ops->set_num_vectors) {
420 pp->num_vectors = MSI_DEF_NUM_VECTORS;
422 pp->ops->set_num_vectors(pp);
424 if (pp->num_vectors > MAX_MSI_IRQS ||
425 pp->num_vectors == 0) {
427 "Invalid number of vectors\n");
432 if (!pp->ops->msi_host_init) {
433 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
435 ret = dw_pcie_allocate_domains(pp);
440 irq_set_chained_handler_and_data(pp->msi_irq,
444 ret = pp->ops->msi_host_init(pp);
450 if (pp->ops->host_init) {
451 ret = pp->ops->host_init(pp);
456 ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
457 if (ret != PCIBIOS_SUCCESSFUL) {
458 dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
460 ret = pcibios_err_to_errno(ret);
463 if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
465 "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
471 bridge->sysdata = pp;
472 bridge->ops = &dw_pcie_ops;
474 ret = pci_scan_root_bus_bridge(bridge);
478 pp->root_bus = bridge->bus;
480 if (pp->ops->scan_bus)
481 pp->ops->scan_bus(pp);
483 pci_bus_size_bridges(pp->root_bus);
484 pci_bus_assign_resources(pp->root_bus);
486 list_for_each_entry(child, &pp->root_bus->children, node)
487 pcie_bus_configure_settings(child);
489 pci_bus_add_devices(pp->root_bus);
493 if (pci_msi_enabled() && !pp->ops->msi_host_init)
494 dw_pcie_free_msi(pp);
497 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
499 void dw_pcie_host_deinit(struct pcie_port *pp)
501 pci_stop_root_bus(pp->root_bus);
502 pci_remove_root_bus(pp->root_bus);
503 if (pci_msi_enabled() && !pp->ops->msi_host_init)
504 dw_pcie_free_msi(pp);
506 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
508 static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
509 u32 devfn, int where, int size, u32 *val,
513 u32 busdev, cfg_size;
515 void __iomem *va_cfg_base;
516 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
518 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
519 PCIE_ATU_FUNC(PCI_FUNC(devfn));
521 if (pci_is_root_bus(bus->parent)) {
522 type = PCIE_ATU_TYPE_CFG0;
523 cpu_addr = pp->cfg0_base;
524 cfg_size = pp->cfg0_size;
525 va_cfg_base = pp->va_cfg0_base;
527 type = PCIE_ATU_TYPE_CFG1;
528 cpu_addr = pp->cfg1_base;
529 cfg_size = pp->cfg1_size;
530 va_cfg_base = pp->va_cfg1_base;
533 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
537 ret = dw_pcie_write(va_cfg_base + where, size, *val);
539 ret = dw_pcie_read(va_cfg_base + where, size, val);
541 if (pci->num_viewport <= 2)
542 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
543 PCIE_ATU_TYPE_IO, pp->io_base,
544 pp->io_bus_addr, pp->io_size);
549 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
550 u32 devfn, int where, int size, u32 *val)
552 if (pp->ops->rd_other_conf)
553 return pp->ops->rd_other_conf(pp, bus, devfn, where,
556 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
560 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
561 u32 devfn, int where, int size, u32 val)
563 if (pp->ops->wr_other_conf)
564 return pp->ops->wr_other_conf(pp, bus, devfn, where,
567 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
571 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
574 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
576 /* If there is no link, then there is no device */
577 if (!pci_is_root_bus(bus)) {
578 if (!dw_pcie_link_up(pci))
581 /* Access only one slot on each root port */
587 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
590 struct pcie_port *pp = bus->sysdata;
592 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
594 return PCIBIOS_DEVICE_NOT_FOUND;
597 if (pci_is_root_bus(bus))
598 return dw_pcie_rd_own_conf(pp, where, size, val);
600 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
603 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
604 int where, int size, u32 val)
606 struct pcie_port *pp = bus->sysdata;
608 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
609 return PCIBIOS_DEVICE_NOT_FOUND;
611 if (pci_is_root_bus(bus))
612 return dw_pcie_wr_own_conf(pp, where, size, val);
614 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
617 static struct pci_ops dw_pcie_ops = {
618 .read = dw_pcie_rd_conf,
619 .write = dw_pcie_wr_conf,
622 void dw_pcie_setup_rc(struct pcie_port *pp)
624 u32 val, ctrl, num_ctrls;
625 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
628 * Enable DBI read-only registers for writing/updating configuration.
629 * Write permission gets disabled towards the end of this function.
631 dw_pcie_dbi_ro_wr_en(pci);
635 if (!pp->ops->msi_host_init) {
636 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
638 /* Initialize IRQ Status array */
639 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
640 pp->irq_mask[ctrl] = ~0;
641 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
642 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
643 4, pp->irq_mask[ctrl]);
644 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
645 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
651 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
652 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
654 /* Setup interrupt pins */
655 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
658 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
660 /* Setup bus numbers */
661 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
664 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
666 /* Setup command register */
667 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
669 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
670 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
671 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
674 * If the platform provides ->rd_other_conf, it means the platform
675 * uses its own address translation component rather than ATU, so
676 * we should not program the ATU here.
678 if (!pp->ops->rd_other_conf) {
679 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
680 PCIE_ATU_TYPE_MEM, pp->mem_base,
681 pp->mem_bus_addr, pp->mem_size);
682 if (pci->num_viewport > 2)
683 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
684 PCIE_ATU_TYPE_IO, pp->io_base,
685 pp->io_bus_addr, pp->io_size);
688 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
690 /* Program correct class for RC */
691 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
693 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
694 val |= PORT_LOGIC_SPEED_CHANGE;
695 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
697 dw_pcie_dbi_ro_wr_dis(pci);
699 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);