1 // SPDX-License-Identifier: GPL-2.0
3 * PCIe host controller driver for Texas Instruments Keystone SoCs
5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
9 * Implementation based on pci-exynos.c and pcie-designware.c
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/irqchip/chained_irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/msi.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/regmap.h>
28 #include <linux/resource.h>
29 #include <linux/signal.h>
31 #include "../../pci.h"
32 #include "pcie-designware.h"
34 #define PCIE_VENDORID_MASK 0xffff
35 #define PCIE_DEVICEID_SHIFT 16
37 /* Application registers */
38 #define CMD_STATUS 0x004
39 #define LTSSM_EN_VAL BIT(0)
40 #define OB_XLAT_EN_VAL BIT(1)
41 #define DBI_CS2 BIT(5)
43 #define CFG_SETUP 0x008
44 #define CFG_BUS(x) (((x) & 0xff) << 16)
45 #define CFG_DEVICE(x) (((x) & 0x1f) << 8)
46 #define CFG_FUNC(x) ((x) & 0x7)
47 #define CFG_TYPE1 BIT(24)
50 #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
51 #define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
52 #define OB_ENABLEN BIT(0)
53 #define OB_WIN_SIZE 8 /* 8MB */
55 #define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
56 #define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
57 #define PCIE_EP_IRQ_SET 0x64
58 #define PCIE_EP_IRQ_CLR 0x68
59 #define INT_ENABLE BIT(0)
61 /* IRQ register defines */
65 #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
66 #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
67 #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
68 #define MSI_IRQ_OFFSET 4
70 #define IRQ_STATUS(n) (0x184 + ((n) << 4))
71 #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
72 #define INTx_EN BIT(0)
74 #define ERR_IRQ_STATUS 0x1c4
75 #define ERR_IRQ_ENABLE_SET 0x1c8
76 #define ERR_AER BIT(5) /* ECRC error */
77 #define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
78 #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
79 #define ERR_CORR BIT(3) /* Correctable error */
80 #define ERR_NONFATAL BIT(2) /* Non-fatal error */
81 #define ERR_FATAL BIT(1) /* Fatal error */
82 #define ERR_SYS BIT(0) /* System error */
83 #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
84 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
86 /* PCIE controller device IDs */
87 #define PCIE_RC_K2HK 0xb008
88 #define PCIE_RC_K2E 0xb009
89 #define PCIE_RC_K2L 0xb00a
90 #define PCIE_RC_K2G 0xb00b
92 #define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
93 #define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
99 #define KS_PCIE_SYSCLOCKOUTEN BIT(0)
101 #define AM654_PCIE_DEV_TYPE_MASK 0x3
102 #define AM654_WIN_SIZE SZ_64K
104 #define APP_ADDR_SPACE_0 (16 * SZ_1K)
106 #define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
108 struct ks_pcie_of_data {
109 enum dw_pcie_device_mode mode;
110 const struct dw_pcie_host_ops *host_ops;
111 const struct dw_pcie_ep_ops *ep_ops;
115 struct keystone_pcie {
119 int legacy_host_irqs[PCI_NUM_INTX];
120 struct device_node *legacy_intc_np;
126 struct device_link **link;
127 struct device_node *msi_intc_np;
128 struct irq_domain *legacy_irq_domain;
129 struct device_node *np;
131 /* Application register space */
132 void __iomem *va_app_base; /* DT 1st resource */
137 static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
139 return readl(ks_pcie->va_app_base + offset);
142 static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
145 writel(val, ks_pcie->va_app_base + offset);
148 static void ks_pcie_msi_irq_ack(struct irq_data *data)
150 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
151 struct keystone_pcie *ks_pcie;
152 u32 irq = data->hwirq;
157 pci = to_dw_pcie_from_pp(pp);
158 ks_pcie = to_keystone_pcie(pci);
160 reg_offset = irq % 8;
163 ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
165 ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
168 static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
170 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
171 struct keystone_pcie *ks_pcie;
175 pci = to_dw_pcie_from_pp(pp);
176 ks_pcie = to_keystone_pcie(pci);
178 msi_target = ks_pcie->app.start + MSI_IRQ;
179 msg->address_lo = lower_32_bits(msi_target);
180 msg->address_hi = upper_32_bits(msi_target);
181 msg->data = data->hwirq;
183 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
184 (int)data->hwirq, msg->address_hi, msg->address_lo);
187 static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
188 const struct cpumask *mask, bool force)
193 static void ks_pcie_msi_mask(struct irq_data *data)
195 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
196 struct keystone_pcie *ks_pcie;
197 u32 irq = data->hwirq;
203 raw_spin_lock_irqsave(&pp->lock, flags);
205 pci = to_dw_pcie_from_pp(pp);
206 ks_pcie = to_keystone_pcie(pci);
208 reg_offset = irq % 8;
211 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
214 raw_spin_unlock_irqrestore(&pp->lock, flags);
217 static void ks_pcie_msi_unmask(struct irq_data *data)
219 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
220 struct keystone_pcie *ks_pcie;
221 u32 irq = data->hwirq;
227 raw_spin_lock_irqsave(&pp->lock, flags);
229 pci = to_dw_pcie_from_pp(pp);
230 ks_pcie = to_keystone_pcie(pci);
232 reg_offset = irq % 8;
235 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
238 raw_spin_unlock_irqrestore(&pp->lock, flags);
241 static struct irq_chip ks_pcie_msi_irq_chip = {
242 .name = "KEYSTONE-PCI-MSI",
243 .irq_ack = ks_pcie_msi_irq_ack,
244 .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
245 .irq_set_affinity = ks_pcie_msi_set_affinity,
246 .irq_mask = ks_pcie_msi_mask,
247 .irq_unmask = ks_pcie_msi_unmask,
250 static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
252 pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
253 return dw_pcie_allocate_domains(pp);
256 static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
259 struct dw_pcie *pci = ks_pcie->pci;
260 struct device *dev = pci->dev;
263 pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
265 if (BIT(0) & pending) {
266 dev_dbg(dev, ": irq: irq_offset %d", offset);
267 generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
270 /* EOI the INTx interrupt */
271 ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
274 static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
276 ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
279 static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
282 struct device *dev = ks_pcie->pci->dev;
284 reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
289 dev_err(dev, "System Error\n");
292 dev_err(dev, "Fatal Error\n");
294 if (reg & ERR_NONFATAL)
295 dev_dbg(dev, "Non Fatal Error\n");
298 dev_dbg(dev, "Correctable Error\n");
300 if (!ks_pcie->is_am6 && (reg & ERR_AXI))
301 dev_err(dev, "AXI tag lookup fatal Error\n");
303 if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
304 dev_err(dev, "ECRC Error\n");
306 ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
311 static void ks_pcie_ack_legacy_irq(struct irq_data *d)
315 static void ks_pcie_mask_legacy_irq(struct irq_data *d)
319 static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
323 static struct irq_chip ks_pcie_legacy_irq_chip = {
324 .name = "Keystone-PCI-Legacy-IRQ",
325 .irq_ack = ks_pcie_ack_legacy_irq,
326 .irq_mask = ks_pcie_mask_legacy_irq,
327 .irq_unmask = ks_pcie_unmask_legacy_irq,
330 static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
332 irq_hw_number_t hw_irq)
334 irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
336 irq_set_chip_data(irq, d->host_data);
341 static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
342 .map = ks_pcie_init_legacy_irq_map,
343 .xlate = irq_domain_xlate_onetwocell,
347 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
348 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
349 * PCIe host controller driver information.
351 * Since modification of dbi_cs2 involves different clock domain, read the
352 * status back to ensure the transition is complete.
354 static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
358 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
360 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
363 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
364 } while (!(val & DBI_CS2));
368 * ks_pcie_clear_dbi_mode() - Disable DBI mode
369 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
370 * PCIe host controller driver information.
372 * Since modification of dbi_cs2 involves different clock domain, read the
373 * status back to ensure the transition is complete.
375 static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
379 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
381 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
384 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
385 } while (val & DBI_CS2);
388 static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
391 u32 num_viewport = ks_pcie->num_viewport;
392 struct dw_pcie *pci = ks_pcie->pci;
393 struct dw_pcie_rp *pp = &pci->pp;
395 struct resource *mem;
398 mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
402 /* Disable BARs for inbound access */
403 ks_pcie_set_dbi_mode(ks_pcie);
404 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
405 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
406 ks_pcie_clear_dbi_mode(ks_pcie);
411 val = ilog2(OB_WIN_SIZE);
412 ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
414 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
415 for (i = 0; i < num_viewport && (start < end); i++) {
416 ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
417 lower_32_bits(start) | OB_ENABLEN);
418 ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
419 upper_32_bits(start));
420 start += OB_WIN_SIZE * SZ_1M;
423 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
424 val |= OB_XLAT_EN_VAL;
425 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
428 static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
429 unsigned int devfn, int where)
431 struct dw_pcie_rp *pp = bus->sysdata;
432 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
433 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
436 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
437 CFG_FUNC(PCI_FUNC(devfn));
438 if (!pci_is_root_bus(bus->parent))
440 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
442 return pp->va_cfg0_base + where;
445 static struct pci_ops ks_child_pcie_ops = {
446 .map_bus = ks_pcie_other_map_bus,
447 .read = pci_generic_config_read,
448 .write = pci_generic_config_write,
452 * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
453 * @bus: A pointer to the PCI bus structure.
455 * This sets BAR0 to enable inbound access for MSI_IRQ register
457 static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
459 struct dw_pcie_rp *pp = bus->sysdata;
460 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
461 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
463 if (!pci_is_root_bus(bus))
466 /* Configure and set up BAR0 */
467 ks_pcie_set_dbi_mode(ks_pcie);
470 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
471 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
473 ks_pcie_clear_dbi_mode(ks_pcie);
476 * For BAR0, just setting bus address for inbound writes (MSI) should
477 * be sufficient. Use physical address to avoid any conflicts.
479 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
484 static struct pci_ops ks_pcie_ops = {
485 .map_bus = dw_pcie_own_conf_map_bus,
486 .read = pci_generic_config_read,
487 .write = pci_generic_config_write,
488 .add_bus = ks_pcie_v3_65_add_bus,
492 * ks_pcie_link_up() - Check if link up
493 * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
494 * controller driver information.
496 static int ks_pcie_link_up(struct dw_pcie *pci)
500 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
501 val &= PORT_LOGIC_LTSSM_STATE_MASK;
502 return (val == PORT_LOGIC_LTSSM_STATE_L0);
505 static void ks_pcie_stop_link(struct dw_pcie *pci)
507 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
510 /* Disable Link training */
511 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
512 val &= ~LTSSM_EN_VAL;
513 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
516 static int ks_pcie_start_link(struct dw_pcie *pci)
518 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
521 /* Initiate Link Training */
522 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
523 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
528 static void ks_pcie_quirk(struct pci_dev *dev)
530 struct pci_bus *bus = dev->bus;
531 struct pci_dev *bridge;
532 static const struct pci_device_id rc_pci_devids[] = {
533 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
534 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
535 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
536 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
537 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
538 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
539 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
540 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
544 if (pci_is_root_bus(bus))
547 /* look for the host bridge */
548 while (!pci_is_root_bus(bus)) {
557 * Keystone PCI controller has a h/w limitation of
558 * 256 bytes maximum read request size. It can't handle
559 * anything higher than this. So force this limit on
560 * all downstream devices.
562 if (pci_match_id(rc_pci_devids, bridge)) {
563 if (pcie_get_readrq(dev) > 256) {
564 dev_info(&dev->dev, "limiting MRRS to 256\n");
565 pcie_set_readrq(dev, 256);
569 DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
571 static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
573 unsigned int irq = desc->irq_data.hwirq;
574 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
575 u32 offset = irq - ks_pcie->msi_host_irq;
576 struct dw_pcie *pci = ks_pcie->pci;
577 struct dw_pcie_rp *pp = &pci->pp;
578 struct device *dev = pci->dev;
579 struct irq_chip *chip = irq_desc_get_chip(desc);
580 u32 vector, reg, pos;
582 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
585 * The chained irq handler installation would have replaced normal
586 * interrupt driver handler so we need to take care of mask/unmask and
589 chained_irq_enter(chip, desc);
591 reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
593 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
594 * shows 1, 9, 17, 25 and so forth
596 for (pos = 0; pos < 4; pos++) {
597 if (!(reg & BIT(pos)))
600 vector = offset + (pos << 3);
601 dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
602 generic_handle_domain_irq(pp->irq_domain, vector);
605 chained_irq_exit(chip, desc);
609 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
610 * @desc: Pointer to irq descriptor
612 * Traverse through pending legacy interrupts and invoke handler for each. Also
613 * takes care of interrupt controller level mask/ack operation.
615 static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
617 unsigned int irq = irq_desc_get_irq(desc);
618 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
619 struct dw_pcie *pci = ks_pcie->pci;
620 struct device *dev = pci->dev;
621 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
622 struct irq_chip *chip = irq_desc_get_chip(desc);
624 dev_dbg(dev, ": Handling legacy irq %d\n", irq);
627 * The chained irq handler installation would have replaced normal
628 * interrupt driver handler so we need to take care of mask/unmask and
631 chained_irq_enter(chip, desc);
632 ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
633 chained_irq_exit(chip, desc);
636 static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
638 struct device *dev = ks_pcie->pci->dev;
639 struct device_node *np = ks_pcie->np;
640 struct device_node *intc_np;
641 struct irq_data *irq_data;
642 int irq_count, irq, ret, i;
644 if (!IS_ENABLED(CONFIG_PCI_MSI))
647 intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
651 dev_warn(dev, "msi-interrupt-controller node is absent\n");
655 irq_count = of_irq_count(intc_np);
657 dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
662 for (i = 0; i < irq_count; i++) {
663 irq = irq_of_parse_and_map(intc_np, i);
669 if (!ks_pcie->msi_host_irq) {
670 irq_data = irq_get_irq_data(irq);
675 ks_pcie->msi_host_irq = irq_data->hwirq;
678 irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
682 of_node_put(intc_np);
686 of_node_put(intc_np);
690 static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
692 struct device *dev = ks_pcie->pci->dev;
693 struct irq_domain *legacy_irq_domain;
694 struct device_node *np = ks_pcie->np;
695 struct device_node *intc_np;
696 int irq_count, irq, ret = 0, i;
698 intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
701 * Since legacy interrupts are modeled as edge-interrupts in
702 * AM6, keep it disabled for now.
706 dev_warn(dev, "legacy-interrupt-controller node is absent\n");
710 irq_count = of_irq_count(intc_np);
712 dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
717 for (i = 0; i < irq_count; i++) {
718 irq = irq_of_parse_and_map(intc_np, i);
723 ks_pcie->legacy_host_irqs[i] = irq;
725 irq_set_chained_handler_and_data(irq,
726 ks_pcie_legacy_irq_handler,
731 irq_domain_add_linear(intc_np, PCI_NUM_INTX,
732 &ks_pcie_legacy_irq_domain_ops, NULL);
733 if (!legacy_irq_domain) {
734 dev_err(dev, "Failed to add irq domain for legacy irqs\n");
738 ks_pcie->legacy_irq_domain = legacy_irq_domain;
740 for (i = 0; i < PCI_NUM_INTX; i++)
741 ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
744 of_node_put(intc_np);
750 * When a PCI device does not exist during config cycles, keystone host
751 * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
752 * This handler always returns 0 for this kind of fault.
754 static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
755 struct pt_regs *regs)
757 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
759 if ((instr & 0x0e100090) == 0x00100090) {
760 int reg = (instr >> 12) & 15;
762 regs->uregs[reg] = -1;
770 static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
774 struct regmap *devctrl_regs;
775 struct dw_pcie *pci = ks_pcie->pci;
776 struct device *dev = pci->dev;
777 struct device_node *np = dev->of_node;
778 struct of_phandle_args args;
779 unsigned int offset = 0;
781 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
782 if (IS_ERR(devctrl_regs))
783 return PTR_ERR(devctrl_regs);
785 /* Do not error out to maintain old DT compatibility */
786 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
788 offset = args.args[0];
790 ret = regmap_read(devctrl_regs, offset, &id);
794 dw_pcie_dbi_ro_wr_en(pci);
795 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
796 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
797 dw_pcie_dbi_ro_wr_dis(pci);
802 static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
804 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
805 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
808 pp->bridge->ops = &ks_pcie_ops;
809 if (!ks_pcie->is_am6)
810 pp->bridge->child_ops = &ks_child_pcie_ops;
812 ret = ks_pcie_config_legacy_irq(ks_pcie);
816 ret = ks_pcie_config_msi_irq(ks_pcie);
820 ks_pcie_stop_link(pci);
821 ks_pcie_setup_rc_app_regs(ks_pcie);
822 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
823 pci->dbi_base + PCI_IO_BASE);
825 ret = ks_pcie_init_id(ks_pcie);
831 * PCIe access errors that result into OCP errors are caught by ARM as
834 hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
835 "Asynchronous external abort");
841 static const struct dw_pcie_host_ops ks_pcie_host_ops = {
842 .host_init = ks_pcie_host_init,
843 .msi_host_init = ks_pcie_msi_host_init,
846 static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
847 .host_init = ks_pcie_host_init,
850 static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
852 struct keystone_pcie *ks_pcie = priv;
854 return ks_pcie_handle_error_irq(ks_pcie);
857 static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
858 u32 reg, size_t size, u32 val)
860 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
862 ks_pcie_set_dbi_mode(ks_pcie);
863 dw_pcie_write(base + reg, size, val);
864 ks_pcie_clear_dbi_mode(ks_pcie);
867 static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
868 .start_link = ks_pcie_start_link,
869 .stop_link = ks_pcie_stop_link,
870 .link_up = ks_pcie_link_up,
871 .write_dbi2 = ks_pcie_am654_write_dbi2,
874 static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
876 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
879 ep->page_size = AM654_WIN_SIZE;
880 flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
881 dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
882 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
885 static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
887 struct dw_pcie *pci = ks_pcie->pci;
890 int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
891 if (int_pin == 0 || int_pin > 4)
894 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
896 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
898 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
899 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
903 static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
904 enum pci_epc_irq_type type,
907 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
908 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
911 case PCI_EPC_IRQ_LEGACY:
912 ks_pcie_am654_raise_legacy_irq(ks_pcie);
914 case PCI_EPC_IRQ_MSI:
915 dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
917 case PCI_EPC_IRQ_MSIX:
918 dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
921 dev_err(pci->dev, "UNKNOWN IRQ type\n");
928 static const struct pci_epc_features ks_pcie_am654_epc_features = {
929 .linkup_notifier = false,
931 .msix_capable = true,
932 .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
933 .bar_fixed_64bit = 1 << BAR_0,
934 .bar_fixed_size[2] = SZ_1M,
935 .bar_fixed_size[3] = SZ_64K,
936 .bar_fixed_size[4] = 256,
937 .bar_fixed_size[5] = SZ_1M,
941 static const struct pci_epc_features*
942 ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
944 return &ks_pcie_am654_epc_features;
947 static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
948 .ep_init = ks_pcie_am654_ep_init,
949 .raise_irq = ks_pcie_am654_raise_irq,
950 .get_features = &ks_pcie_am654_get_features,
953 static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
955 int num_lanes = ks_pcie->num_lanes;
957 while (num_lanes--) {
958 phy_power_off(ks_pcie->phy[num_lanes]);
959 phy_exit(ks_pcie->phy[num_lanes]);
963 static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
967 int num_lanes = ks_pcie->num_lanes;
969 for (i = 0; i < num_lanes; i++) {
970 ret = phy_reset(ks_pcie->phy[i]);
974 ret = phy_init(ks_pcie->phy[i]);
978 ret = phy_power_on(ks_pcie->phy[i]);
980 phy_exit(ks_pcie->phy[i]);
989 phy_power_off(ks_pcie->phy[i]);
990 phy_exit(ks_pcie->phy[i]);
996 static int ks_pcie_set_mode(struct device *dev)
998 struct device_node *np = dev->of_node;
999 struct of_phandle_args args;
1000 unsigned int offset = 0;
1001 struct regmap *syscon;
1006 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1010 /* Do not error out to maintain old DT compatibility */
1011 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1013 offset = args.args[0];
1015 mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1016 val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1018 ret = regmap_update_bits(syscon, offset, mask, val);
1020 dev_err(dev, "failed to set pcie mode\n");
1027 static int ks_pcie_am654_set_mode(struct device *dev,
1028 enum dw_pcie_device_mode mode)
1030 struct device_node *np = dev->of_node;
1031 struct of_phandle_args args;
1032 unsigned int offset = 0;
1033 struct regmap *syscon;
1038 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1042 /* Do not error out to maintain old DT compatibility */
1043 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1045 offset = args.args[0];
1047 mask = AM654_PCIE_DEV_TYPE_MASK;
1050 case DW_PCIE_RC_TYPE:
1053 case DW_PCIE_EP_TYPE:
1057 dev_err(dev, "INVALID device type %d\n", mode);
1061 ret = regmap_update_bits(syscon, offset, mask, val);
1063 dev_err(dev, "failed to set pcie mode\n");
1070 static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1071 .host_ops = &ks_pcie_host_ops,
1072 .version = DW_PCIE_VER_365A,
1075 static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1076 .host_ops = &ks_pcie_am654_host_ops,
1077 .mode = DW_PCIE_RC_TYPE,
1078 .version = DW_PCIE_VER_490A,
1081 static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1082 .ep_ops = &ks_pcie_am654_ep_ops,
1083 .mode = DW_PCIE_EP_TYPE,
1084 .version = DW_PCIE_VER_490A,
1087 static const struct of_device_id ks_pcie_of_match[] = {
1090 .data = &ks_pcie_rc_of_data,
1091 .compatible = "ti,keystone-pcie",
1094 .data = &ks_pcie_am654_rc_of_data,
1095 .compatible = "ti,am654-pcie-rc",
1098 .data = &ks_pcie_am654_ep_of_data,
1099 .compatible = "ti,am654-pcie-ep",
1104 static int __init ks_pcie_probe(struct platform_device *pdev)
1106 const struct dw_pcie_host_ops *host_ops;
1107 const struct dw_pcie_ep_ops *ep_ops;
1108 struct device *dev = &pdev->dev;
1109 struct device_node *np = dev->of_node;
1110 const struct ks_pcie_of_data *data;
1111 enum dw_pcie_device_mode mode;
1112 struct dw_pcie *pci;
1113 struct keystone_pcie *ks_pcie;
1114 struct device_link **link;
1115 struct gpio_desc *gpiod;
1116 struct resource *res;
1127 data = of_device_get_match_data(dev);
1131 version = data->version;
1132 host_ops = data->host_ops;
1133 ep_ops = data->ep_ops;
1136 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1140 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1144 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1145 ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1146 if (IS_ERR(ks_pcie->va_app_base))
1147 return PTR_ERR(ks_pcie->va_app_base);
1149 ks_pcie->app = *res;
1151 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1152 base = devm_pci_remap_cfg_resource(dev, res);
1154 return PTR_ERR(base);
1156 if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1157 ks_pcie->is_am6 = true;
1159 pci->dbi_base = base;
1160 pci->dbi_base2 = base;
1162 pci->ops = &ks_pcie_dw_pcie_ops;
1163 pci->version = version;
1165 irq = platform_get_irq(pdev, 0);
1169 ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1170 "ks-pcie-error-irq", ks_pcie);
1172 dev_err(dev, "failed to request error IRQ %d\n",
1177 ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1181 phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1185 link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1189 for (i = 0; i < num_lanes; i++) {
1190 snprintf(name, sizeof(name), "pcie-phy%d", i);
1191 phy[i] = devm_phy_optional_get(dev, name);
1192 if (IS_ERR(phy[i])) {
1193 ret = PTR_ERR(phy[i]);
1200 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1209 ks_pcie->link = link;
1210 ks_pcie->num_lanes = num_lanes;
1213 gpiod = devm_gpiod_get_optional(dev, "reset",
1215 if (IS_ERR(gpiod)) {
1216 ret = PTR_ERR(gpiod);
1217 if (ret != -EPROBE_DEFER)
1218 dev_err(dev, "Failed to get reset GPIO\n");
1222 ret = ks_pcie_enable_phy(ks_pcie);
1224 dev_err(dev, "failed to enable phy\n");
1228 platform_set_drvdata(pdev, ks_pcie);
1229 pm_runtime_enable(dev);
1230 ret = pm_runtime_get_sync(dev);
1232 dev_err(dev, "pm_runtime_get_sync failed\n");
1236 if (dw_pcie_ver_is_ge(pci, 480A))
1237 ret = ks_pcie_am654_set_mode(dev, mode);
1239 ret = ks_pcie_set_mode(dev);
1244 case DW_PCIE_RC_TYPE:
1245 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1250 ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1252 dev_err(dev, "unable to read *num-viewport* property\n");
1257 * "Power Sequencing and Reset Signal Timings" table in
1258 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1259 * indicates PERST# should be deasserted after minimum of 100us
1260 * once REFCLK is stable. The REFCLK to the connector in RC
1261 * mode is selected while enabling the PHY. So deassert PERST#
1265 usleep_range(100, 200);
1266 gpiod_set_value_cansleep(gpiod, 1);
1269 ks_pcie->num_viewport = num_viewport;
1270 pci->pp.ops = host_ops;
1271 ret = dw_pcie_host_init(&pci->pp);
1275 case DW_PCIE_EP_TYPE:
1276 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1281 pci->ep.ops = ep_ops;
1282 ret = dw_pcie_ep_init(&pci->ep);
1287 dev_err(dev, "INVALID device type %d\n", mode);
1290 ks_pcie_enable_error_irq(ks_pcie);
1295 pm_runtime_put(dev);
1296 pm_runtime_disable(dev);
1297 ks_pcie_disable_phy(ks_pcie);
1300 while (--i >= 0 && link[i])
1301 device_link_del(link[i]);
1306 static int __exit ks_pcie_remove(struct platform_device *pdev)
1308 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1309 struct device_link **link = ks_pcie->link;
1310 int num_lanes = ks_pcie->num_lanes;
1311 struct device *dev = &pdev->dev;
1313 pm_runtime_put(dev);
1314 pm_runtime_disable(dev);
1315 ks_pcie_disable_phy(ks_pcie);
1317 device_link_del(link[num_lanes]);
1322 static struct platform_driver ks_pcie_driver __refdata = {
1323 .probe = ks_pcie_probe,
1324 .remove = __exit_p(ks_pcie_remove),
1326 .name = "keystone-pcie",
1327 .of_match_table = ks_pcie_of_match,
1330 builtin_platform_driver(ks_pcie_driver);