1 // SPDX-License-Identifier: GPL-2.0
3 * Microchip AXI PCIe Bridge host controller driver
5 * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
10 #include <linux/clk.h>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/module.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci-ecam.h>
18 #include <linux/platform_device.h>
22 /* Number of MSI IRQs */
23 #define MC_NUM_MSI_IRQS 32
24 #define MC_NUM_MSI_IRQS_CODED 5
26 /* PCIe Bridge Phy and Controller Phy offsets */
27 #define MC_PCIE1_BRIDGE_ADDR 0x00008000u
28 #define MC_PCIE1_CTRL_ADDR 0x0000a000u
30 #define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
31 #define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
33 /* PCIe Controller Phy Regs */
34 #define SEC_ERROR_CNT 0x20
35 #define DED_ERROR_CNT 0x24
36 #define SEC_ERROR_INT 0x28
37 #define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
38 #define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
39 #define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
40 #define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
41 #define NUM_SEC_ERROR_INTS (4)
42 #define SEC_ERROR_INT_MASK 0x2c
43 #define DED_ERROR_INT 0x30
44 #define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
45 #define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
46 #define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
47 #define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
48 #define NUM_DED_ERROR_INTS (4)
49 #define DED_ERROR_INT_MASK 0x34
50 #define ECC_CONTROL 0x38
51 #define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
52 #define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
53 #define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
54 #define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
55 #define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
56 #define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
57 #define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
58 #define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
59 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
60 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
61 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
62 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
63 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
64 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
65 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
66 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
67 #define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
68 #define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
69 #define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
70 #define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
71 #define LTSSM_STATE 0x5c
72 #define LTSSM_L0_STATE 0x10
73 #define PCIE_EVENT_INT 0x14c
74 #define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
75 #define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
76 #define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
77 #define PCIE_EVENT_INT_MASK GENMASK(2, 0)
78 #define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
79 #define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
80 #define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
81 #define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
82 #define PCIE_EVENT_INT_ENB_SHIFT 16
83 #define NUM_PCIE_EVENTS (3)
85 /* PCIe Bridge Phy Regs */
86 #define PCIE_PCI_IDS_DW1 0x9c
88 /* PCIe Config space MSI capability structure */
89 #define MC_MSI_CAP_CTRL_OFFSET 0xe0u
90 #define MC_MSI_MAX_Q_AVAIL (MC_NUM_MSI_IRQS_CODED << 1)
91 #define MC_MSI_Q_SIZE (MC_NUM_MSI_IRQS_CODED << 4)
93 #define IMASK_LOCAL 0x180
94 #define DMA_END_ENGINE_0_MASK 0x00000000u
95 #define DMA_END_ENGINE_0_SHIFT 0
96 #define DMA_END_ENGINE_1_MASK 0x00000000u
97 #define DMA_END_ENGINE_1_SHIFT 1
98 #define DMA_ERROR_ENGINE_0_MASK 0x00000100u
99 #define DMA_ERROR_ENGINE_0_SHIFT 8
100 #define DMA_ERROR_ENGINE_1_MASK 0x00000200u
101 #define DMA_ERROR_ENGINE_1_SHIFT 9
102 #define A_ATR_EVT_POST_ERR_MASK 0x00010000u
103 #define A_ATR_EVT_POST_ERR_SHIFT 16
104 #define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
105 #define A_ATR_EVT_FETCH_ERR_SHIFT 17
106 #define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
107 #define A_ATR_EVT_DISCARD_ERR_SHIFT 18
108 #define A_ATR_EVT_DOORBELL_MASK 0x00000000u
109 #define A_ATR_EVT_DOORBELL_SHIFT 19
110 #define P_ATR_EVT_POST_ERR_MASK 0x00100000u
111 #define P_ATR_EVT_POST_ERR_SHIFT 20
112 #define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
113 #define P_ATR_EVT_FETCH_ERR_SHIFT 21
114 #define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
115 #define P_ATR_EVT_DISCARD_ERR_SHIFT 22
116 #define P_ATR_EVT_DOORBELL_MASK 0x00000000u
117 #define P_ATR_EVT_DOORBELL_SHIFT 23
118 #define PM_MSI_INT_INTA_MASK 0x01000000u
119 #define PM_MSI_INT_INTA_SHIFT 24
120 #define PM_MSI_INT_INTB_MASK 0x02000000u
121 #define PM_MSI_INT_INTB_SHIFT 25
122 #define PM_MSI_INT_INTC_MASK 0x04000000u
123 #define PM_MSI_INT_INTC_SHIFT 26
124 #define PM_MSI_INT_INTD_MASK 0x08000000u
125 #define PM_MSI_INT_INTD_SHIFT 27
126 #define PM_MSI_INT_INTX_MASK 0x0f000000u
127 #define PM_MSI_INT_INTX_SHIFT 24
128 #define PM_MSI_INT_MSI_MASK 0x10000000u
129 #define PM_MSI_INT_MSI_SHIFT 28
130 #define PM_MSI_INT_AER_EVT_MASK 0x20000000u
131 #define PM_MSI_INT_AER_EVT_SHIFT 29
132 #define PM_MSI_INT_EVENTS_MASK 0x40000000u
133 #define PM_MSI_INT_EVENTS_SHIFT 30
134 #define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
135 #define PM_MSI_INT_SYS_ERR_SHIFT 31
136 #define NUM_LOCAL_EVENTS 15
137 #define ISTATUS_LOCAL 0x184
138 #define IMASK_HOST 0x188
139 #define ISTATUS_HOST 0x18c
140 #define MSI_ADDR 0x190
141 #define ISTATUS_MSI 0x194
143 /* PCIe Master table init defines */
144 #define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
145 #define ATR0_PCIE_ATR_SIZE 0x25
146 #define ATR0_PCIE_ATR_SIZE_SHIFT 1
147 #define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
148 #define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
149 #define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
150 #define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
152 /* PCIe AXI slave table init defines */
153 #define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
154 #define ATR_SIZE_SHIFT 1
155 #define ATR_IMPL_ENABLE 1
156 #define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
157 #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
158 #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
159 #define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
160 #define PCIE_TX_RX_INTERFACE 0x00000000u
161 #define PCIE_CONFIG_INTERFACE 0x00000001u
163 #define ATR_ENTRY_SIZE 32
165 #define EVENT_PCIE_L2_EXIT 0
166 #define EVENT_PCIE_HOTRST_EXIT 1
167 #define EVENT_PCIE_DLUP_EXIT 2
168 #define EVENT_SEC_TX_RAM_SEC_ERR 3
169 #define EVENT_SEC_RX_RAM_SEC_ERR 4
170 #define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 5
171 #define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 6
172 #define EVENT_DED_TX_RAM_DED_ERR 7
173 #define EVENT_DED_RX_RAM_DED_ERR 8
174 #define EVENT_DED_AXI2PCIE_RAM_DED_ERR 9
175 #define EVENT_DED_PCIE2AXI_RAM_DED_ERR 10
176 #define EVENT_LOCAL_DMA_END_ENGINE_0 11
177 #define EVENT_LOCAL_DMA_END_ENGINE_1 12
178 #define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
179 #define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
180 #define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
181 #define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
182 #define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
183 #define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
184 #define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
185 #define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
186 #define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
187 #define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
188 #define EVENT_LOCAL_PM_MSI_INT_INTX 23
189 #define EVENT_LOCAL_PM_MSI_INT_MSI 24
190 #define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
191 #define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
192 #define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
193 #define NUM_EVENTS 28
195 #define PCIE_EVENT_CAUSE(x, s) \
196 [EVENT_PCIE_ ## x] = { __stringify(x), s }
198 #define SEC_ERROR_CAUSE(x, s) \
199 [EVENT_SEC_ ## x] = { __stringify(x), s }
201 #define DED_ERROR_CAUSE(x, s) \
202 [EVENT_DED_ ## x] = { __stringify(x), s }
204 #define LOCAL_EVENT_CAUSE(x, s) \
205 [EVENT_LOCAL_ ## x] = { __stringify(x), s }
207 #define PCIE_EVENT(x) \
208 .base = MC_PCIE_CTRL_ADDR, \
209 .offset = PCIE_EVENT_INT, \
210 .mask_offset = PCIE_EVENT_INT, \
212 .mask = PCIE_EVENT_INT_ ## x ## _INT, \
213 .enb_mask = PCIE_EVENT_INT_ENB_MASK
215 #define SEC_EVENT(x) \
216 .base = MC_PCIE_CTRL_ADDR, \
217 .offset = SEC_ERROR_INT, \
218 .mask_offset = SEC_ERROR_INT_MASK, \
219 .mask = SEC_ERROR_INT_ ## x ## _INT, \
223 #define DED_EVENT(x) \
224 .base = MC_PCIE_CTRL_ADDR, \
225 .offset = DED_ERROR_INT, \
226 .mask_offset = DED_ERROR_INT_MASK, \
228 .mask = DED_ERROR_INT_ ## x ## _INT, \
231 #define LOCAL_EVENT(x) \
232 .base = MC_PCIE_BRIDGE_ADDR, \
233 .offset = ISTATUS_LOCAL, \
234 .mask_offset = IMASK_LOCAL, \
236 .mask = x ## _MASK, \
239 #define PCIE_EVENT_TO_EVENT_MAP(x) \
240 { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
242 #define SEC_ERROR_TO_EVENT_MAP(x) \
243 { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
245 #define DED_ERROR_TO_EVENT_MAP(x) \
246 { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
248 #define LOCAL_STATUS_TO_EVENT_MAP(x) \
249 { x ## _MASK, EVENT_LOCAL_ ## x }
257 struct mutex lock; /* Protect used bitmap */
258 struct irq_domain *msi_domain;
259 struct irq_domain *dev_domain;
262 DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
266 void __iomem *axi_base_addr;
268 struct irq_domain *intx_domain;
269 struct irq_domain *event_domain;
279 static const struct cause event_cause[NUM_EVENTS] = {
280 PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
281 PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
282 PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
283 SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
284 SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
285 SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
286 SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
287 DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
288 DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
289 DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
290 DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
291 LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
292 LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
293 LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
294 LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
295 LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
296 LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
297 LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
298 LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
299 LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
300 LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
301 LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
304 static struct event_map pcie_event_to_event[] = {
305 PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
306 PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
307 PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
310 static struct event_map sec_error_to_event[] = {
311 SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
312 SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
313 SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
314 SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
317 static struct event_map ded_error_to_event[] = {
318 DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
319 DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
320 DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
321 DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
324 static struct event_map local_status_to_event[] = {
325 LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
326 LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
327 LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
328 LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
329 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
330 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
331 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
332 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
333 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
334 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
335 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
336 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
337 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
338 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
339 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
340 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
341 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
353 { PCIE_EVENT(L2_EXIT) },
354 { PCIE_EVENT(HOTRST_EXIT) },
355 { PCIE_EVENT(DLUP_EXIT) },
356 { SEC_EVENT(TX_RAM_SEC_ERR) },
357 { SEC_EVENT(RX_RAM_SEC_ERR) },
358 { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
359 { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
360 { DED_EVENT(TX_RAM_DED_ERR) },
361 { DED_EVENT(RX_RAM_DED_ERR) },
362 { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
363 { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
364 { LOCAL_EVENT(DMA_END_ENGINE_0) },
365 { LOCAL_EVENT(DMA_END_ENGINE_1) },
366 { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
367 { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
368 { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
369 { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
370 { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
371 { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
372 { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
373 { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
374 { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
375 { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
376 { LOCAL_EVENT(PM_MSI_INT_INTX) },
377 { LOCAL_EVENT(PM_MSI_INT_MSI) },
378 { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
379 { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
380 { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
383 static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
385 static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
387 struct mc_msi *msi = &port->msi;
388 u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
389 u16 msg_ctrl = readw_relaxed(base + cap_offset + PCI_MSI_FLAGS);
391 msg_ctrl |= PCI_MSI_FLAGS_ENABLE;
392 msg_ctrl &= ~PCI_MSI_FLAGS_QMASK;
393 msg_ctrl |= MC_MSI_MAX_Q_AVAIL;
394 msg_ctrl &= ~PCI_MSI_FLAGS_QSIZE;
395 msg_ctrl |= MC_MSI_Q_SIZE;
396 msg_ctrl |= PCI_MSI_FLAGS_64BIT;
398 writew_relaxed(msg_ctrl, base + cap_offset + PCI_MSI_FLAGS);
400 writel_relaxed(lower_32_bits(msi->vector_phy),
401 base + cap_offset + PCI_MSI_ADDRESS_LO);
402 writel_relaxed(upper_32_bits(msi->vector_phy),
403 base + cap_offset + PCI_MSI_ADDRESS_HI);
406 static void mc_handle_msi(struct irq_desc *desc)
408 struct mc_pcie *port = irq_desc_get_handler_data(desc);
409 struct irq_chip *chip = irq_desc_get_chip(desc);
410 struct device *dev = port->dev;
411 struct mc_msi *msi = &port->msi;
412 void __iomem *bridge_base_addr =
413 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
414 unsigned long status;
418 chained_irq_enter(chip, desc);
420 status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
421 if (status & PM_MSI_INT_MSI_MASK) {
422 writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
423 status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
424 for_each_set_bit(bit, &status, msi->num_vectors) {
425 ret = generic_handle_domain_irq(msi->dev_domain, bit);
427 dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
432 chained_irq_exit(chip, desc);
435 static void mc_msi_bottom_irq_ack(struct irq_data *data)
437 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
438 void __iomem *bridge_base_addr =
439 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
440 u32 bitpos = data->hwirq;
442 writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
445 static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
447 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
448 phys_addr_t addr = port->msi.vector_phy;
450 msg->address_lo = lower_32_bits(addr);
451 msg->address_hi = upper_32_bits(addr);
452 msg->data = data->hwirq;
454 dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
455 (int)data->hwirq, msg->address_hi, msg->address_lo);
458 static int mc_msi_set_affinity(struct irq_data *irq_data,
459 const struct cpumask *mask, bool force)
464 static struct irq_chip mc_msi_bottom_irq_chip = {
465 .name = "Microchip MSI",
466 .irq_ack = mc_msi_bottom_irq_ack,
467 .irq_compose_msi_msg = mc_compose_msi_msg,
468 .irq_set_affinity = mc_msi_set_affinity,
471 static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
472 unsigned int nr_irqs, void *args)
474 struct mc_pcie *port = domain->host_data;
475 struct mc_msi *msi = &port->msi;
476 void __iomem *bridge_base_addr =
477 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
481 mutex_lock(&msi->lock);
482 bit = find_first_zero_bit(msi->used, msi->num_vectors);
483 if (bit >= msi->num_vectors) {
484 mutex_unlock(&msi->lock);
488 set_bit(bit, msi->used);
490 irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
491 domain->host_data, handle_edge_irq, NULL, NULL);
493 /* Enable MSI interrupts */
494 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
495 val |= PM_MSI_INT_MSI_MASK;
496 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
498 mutex_unlock(&msi->lock);
503 static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
504 unsigned int nr_irqs)
506 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
507 struct mc_pcie *port = irq_data_get_irq_chip_data(d);
508 struct mc_msi *msi = &port->msi;
510 mutex_lock(&msi->lock);
512 if (test_bit(d->hwirq, msi->used))
513 __clear_bit(d->hwirq, msi->used);
515 dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
517 mutex_unlock(&msi->lock);
520 static const struct irq_domain_ops msi_domain_ops = {
521 .alloc = mc_irq_msi_domain_alloc,
522 .free = mc_irq_msi_domain_free,
525 static struct irq_chip mc_msi_irq_chip = {
526 .name = "Microchip PCIe MSI",
527 .irq_ack = irq_chip_ack_parent,
528 .irq_mask = pci_msi_mask_irq,
529 .irq_unmask = pci_msi_unmask_irq,
532 static struct msi_domain_info mc_msi_domain_info = {
533 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
535 .chip = &mc_msi_irq_chip,
538 static int mc_allocate_msi_domains(struct mc_pcie *port)
540 struct device *dev = port->dev;
541 struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
542 struct mc_msi *msi = &port->msi;
544 mutex_init(&port->msi.lock);
546 msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
547 &msi_domain_ops, port);
548 if (!msi->dev_domain) {
549 dev_err(dev, "failed to create IRQ domain\n");
553 msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
555 if (!msi->msi_domain) {
556 dev_err(dev, "failed to create MSI domain\n");
557 irq_domain_remove(msi->dev_domain);
564 static void mc_handle_intx(struct irq_desc *desc)
566 struct mc_pcie *port = irq_desc_get_handler_data(desc);
567 struct irq_chip *chip = irq_desc_get_chip(desc);
568 struct device *dev = port->dev;
569 void __iomem *bridge_base_addr =
570 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
571 unsigned long status;
575 chained_irq_enter(chip, desc);
577 status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
578 if (status & PM_MSI_INT_INTX_MASK) {
579 status &= PM_MSI_INT_INTX_MASK;
580 status >>= PM_MSI_INT_INTX_SHIFT;
581 for_each_set_bit(bit, &status, PCI_NUM_INTX) {
582 ret = generic_handle_domain_irq(port->intx_domain, bit);
584 dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
589 chained_irq_exit(chip, desc);
592 static void mc_ack_intx_irq(struct irq_data *data)
594 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
595 void __iomem *bridge_base_addr =
596 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
597 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
599 writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
602 static void mc_mask_intx_irq(struct irq_data *data)
604 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
605 void __iomem *bridge_base_addr =
606 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
608 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
611 raw_spin_lock_irqsave(&port->lock, flags);
612 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
614 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
615 raw_spin_unlock_irqrestore(&port->lock, flags);
618 static void mc_unmask_intx_irq(struct irq_data *data)
620 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
621 void __iomem *bridge_base_addr =
622 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
624 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
627 raw_spin_lock_irqsave(&port->lock, flags);
628 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
630 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
631 raw_spin_unlock_irqrestore(&port->lock, flags);
634 static struct irq_chip mc_intx_irq_chip = {
635 .name = "Microchip PCIe INTx",
636 .irq_ack = mc_ack_intx_irq,
637 .irq_mask = mc_mask_intx_irq,
638 .irq_unmask = mc_unmask_intx_irq,
641 static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
642 irq_hw_number_t hwirq)
644 irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
645 irq_set_chip_data(irq, domain->host_data);
650 static const struct irq_domain_ops intx_domain_ops = {
651 .map = mc_pcie_intx_map,
654 static inline u32 reg_to_event(u32 reg, struct event_map field)
656 return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
659 static u32 pcie_events(void __iomem *addr)
661 u32 reg = readl_relaxed(addr);
665 for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
666 val |= reg_to_event(reg, pcie_event_to_event[i]);
671 static u32 sec_errors(void __iomem *addr)
673 u32 reg = readl_relaxed(addr);
677 for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
678 val |= reg_to_event(reg, sec_error_to_event[i]);
683 static u32 ded_errors(void __iomem *addr)
685 u32 reg = readl_relaxed(addr);
689 for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
690 val |= reg_to_event(reg, ded_error_to_event[i]);
695 static u32 local_events(void __iomem *addr)
697 u32 reg = readl_relaxed(addr);
701 for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
702 val |= reg_to_event(reg, local_status_to_event[i]);
707 static u32 get_events(struct mc_pcie *port)
709 void __iomem *bridge_base_addr =
710 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
711 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
714 events |= pcie_events(ctrl_base_addr + PCIE_EVENT_INT);
715 events |= sec_errors(ctrl_base_addr + SEC_ERROR_INT);
716 events |= ded_errors(ctrl_base_addr + DED_ERROR_INT);
717 events |= local_events(bridge_base_addr + ISTATUS_LOCAL);
722 static irqreturn_t mc_event_handler(int irq, void *dev_id)
724 struct mc_pcie *port = dev_id;
725 struct device *dev = port->dev;
726 struct irq_data *data;
728 data = irq_domain_get_irq_data(port->event_domain, irq);
730 if (event_cause[data->hwirq].str)
731 dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
733 dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
738 static void mc_handle_event(struct irq_desc *desc)
740 struct mc_pcie *port = irq_desc_get_handler_data(desc);
741 unsigned long events;
743 struct irq_chip *chip = irq_desc_get_chip(desc);
745 chained_irq_enter(chip, desc);
747 events = get_events(port);
749 for_each_set_bit(bit, &events, NUM_EVENTS)
750 generic_handle_domain_irq(port->event_domain, bit);
752 chained_irq_exit(chip, desc);
755 static void mc_ack_event_irq(struct irq_data *data)
757 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
758 u32 event = data->hwirq;
762 addr = port->axi_base_addr + event_descs[event].base +
763 event_descs[event].offset;
764 mask = event_descs[event].mask;
765 mask |= event_descs[event].enb_mask;
767 writel_relaxed(mask, addr);
770 static void mc_mask_event_irq(struct irq_data *data)
772 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
773 u32 event = data->hwirq;
778 addr = port->axi_base_addr + event_descs[event].base +
779 event_descs[event].mask_offset;
780 mask = event_descs[event].mask;
781 if (event_descs[event].enb_mask) {
782 mask <<= PCIE_EVENT_INT_ENB_SHIFT;
783 mask &= PCIE_EVENT_INT_ENB_MASK;
786 if (!event_descs[event].mask_high)
789 raw_spin_lock(&port->lock);
790 val = readl_relaxed(addr);
791 if (event_descs[event].mask_high)
796 writel_relaxed(val, addr);
797 raw_spin_unlock(&port->lock);
800 static void mc_unmask_event_irq(struct irq_data *data)
802 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
803 u32 event = data->hwirq;
808 addr = port->axi_base_addr + event_descs[event].base +
809 event_descs[event].mask_offset;
810 mask = event_descs[event].mask;
812 if (event_descs[event].enb_mask)
813 mask <<= PCIE_EVENT_INT_ENB_SHIFT;
815 if (event_descs[event].mask_high)
818 if (event_descs[event].enb_mask)
819 mask &= PCIE_EVENT_INT_ENB_MASK;
821 raw_spin_lock(&port->lock);
822 val = readl_relaxed(addr);
823 if (event_descs[event].mask_high)
827 writel_relaxed(val, addr);
828 raw_spin_unlock(&port->lock);
831 static struct irq_chip mc_event_irq_chip = {
832 .name = "Microchip PCIe EVENT",
833 .irq_ack = mc_ack_event_irq,
834 .irq_mask = mc_mask_event_irq,
835 .irq_unmask = mc_unmask_event_irq,
838 static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
839 irq_hw_number_t hwirq)
841 irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
842 irq_set_chip_data(irq, domain->host_data);
847 static const struct irq_domain_ops event_domain_ops = {
848 .map = mc_pcie_event_map,
851 static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
856 clk = devm_clk_get_optional(dev, id);
862 ret = clk_prepare_enable(clk);
866 devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare,
872 static int mc_pcie_init_clks(struct device *dev)
878 * PCIe may be clocked via Fabric Interface using between 1 and 4
879 * clocks. Scan DT for clocks and enable them if present
881 for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
882 fic = mc_pcie_init_clk(dev, poss_clks[i]);
890 static int mc_pcie_init_irq_domains(struct mc_pcie *port)
892 struct device *dev = port->dev;
893 struct device_node *node = dev->of_node;
894 struct device_node *pcie_intc_node;
897 pcie_intc_node = of_get_next_child(node, NULL);
898 if (!pcie_intc_node) {
899 dev_err(dev, "failed to find PCIe Intc node\n");
903 port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
904 &event_domain_ops, port);
905 if (!port->event_domain) {
906 dev_err(dev, "failed to get event domain\n");
907 of_node_put(pcie_intc_node);
911 irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
913 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
914 &intx_domain_ops, port);
915 if (!port->intx_domain) {
916 dev_err(dev, "failed to get an INTx IRQ domain\n");
917 of_node_put(pcie_intc_node);
921 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
923 of_node_put(pcie_intc_node);
924 raw_spin_lock_init(&port->lock);
926 return mc_allocate_msi_domains(port);
929 static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
930 phys_addr_t axi_addr, phys_addr_t pci_addr,
933 u32 atr_sz = ilog2(size) - 1;
937 val = PCIE_CONFIG_INTERFACE;
939 val = PCIE_TX_RX_INTERFACE;
941 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
942 ATR0_AXI4_SLV0_TRSL_PARAM);
944 val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
946 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
947 ATR0_AXI4_SLV0_SRCADDR_PARAM);
949 val = upper_32_bits(axi_addr);
950 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
951 ATR0_AXI4_SLV0_SRC_ADDR);
953 val = lower_32_bits(pci_addr);
954 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
955 ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
957 val = upper_32_bits(pci_addr);
958 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
959 ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
961 val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
962 val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
963 writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
964 writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
967 static int mc_pcie_setup_windows(struct platform_device *pdev,
968 struct mc_pcie *port)
970 void __iomem *bridge_base_addr =
971 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
972 struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
973 struct resource_entry *entry;
977 resource_list_for_each_entry(entry, &bridge->windows) {
978 if (resource_type(entry->res) == IORESOURCE_MEM) {
979 pci_addr = entry->res->start - entry->offset;
980 mc_pcie_setup_window(bridge_base_addr, index,
981 entry->res->start, pci_addr,
982 resource_size(entry->res));
990 static int mc_platform_init(struct pci_config_window *cfg)
992 struct device *dev = cfg->parent;
993 struct platform_device *pdev = to_platform_device(dev);
994 struct mc_pcie *port;
995 void __iomem *bridge_base_addr;
996 void __iomem *ctrl_base_addr;
999 int i, intx_irq, msi_irq, event_irq;
1003 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1008 ret = mc_pcie_init_clks(dev);
1010 dev_err(dev, "failed to get clock resources, error %d\n", ret);
1014 port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
1015 if (IS_ERR(port->axi_base_addr))
1016 return PTR_ERR(port->axi_base_addr);
1018 bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
1019 ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
1021 port->msi.vector_phy = MSI_ADDR;
1022 port->msi.num_vectors = MC_NUM_MSI_IRQS;
1023 ret = mc_pcie_init_irq_domains(port);
1025 dev_err(dev, "failed creating IRQ domains\n");
1029 irq = platform_get_irq(pdev, 0);
1033 for (i = 0; i < NUM_EVENTS; i++) {
1034 event_irq = irq_create_mapping(port->event_domain, i);
1036 dev_err(dev, "failed to map hwirq %d\n", i);
1040 err = devm_request_irq(dev, event_irq, mc_event_handler,
1041 0, event_cause[i].sym, port);
1043 dev_err(dev, "failed to request IRQ %d\n", event_irq);
1048 intx_irq = irq_create_mapping(port->event_domain,
1049 EVENT_LOCAL_PM_MSI_INT_INTX);
1051 dev_err(dev, "failed to map INTx interrupt\n");
1055 /* Plug the INTx chained handler */
1056 irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
1058 msi_irq = irq_create_mapping(port->event_domain,
1059 EVENT_LOCAL_PM_MSI_INT_MSI);
1063 /* Plug the MSI chained handler */
1064 irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
1066 /* Plug the main event chained handler */
1067 irq_set_chained_handler_and_data(irq, mc_handle_event, port);
1069 /* Hardware doesn't setup MSI by default */
1070 mc_pcie_enable_msi(port, cfg->win);
1072 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
1073 val |= PM_MSI_INT_INTX_MASK;
1074 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
1076 writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
1078 val = PCIE_EVENT_INT_L2_EXIT_INT |
1079 PCIE_EVENT_INT_HOTRST_EXIT_INT |
1080 PCIE_EVENT_INT_DLUP_EXIT_INT;
1081 writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
1083 val = SEC_ERROR_INT_TX_RAM_SEC_ERR_INT |
1084 SEC_ERROR_INT_RX_RAM_SEC_ERR_INT |
1085 SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT |
1086 SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT;
1087 writel_relaxed(val, ctrl_base_addr + SEC_ERROR_INT);
1088 writel_relaxed(0, ctrl_base_addr + SEC_ERROR_INT_MASK);
1089 writel_relaxed(0, ctrl_base_addr + SEC_ERROR_CNT);
1091 val = DED_ERROR_INT_TX_RAM_DED_ERR_INT |
1092 DED_ERROR_INT_RX_RAM_DED_ERR_INT |
1093 DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT |
1094 DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT;
1095 writel_relaxed(val, ctrl_base_addr + DED_ERROR_INT);
1096 writel_relaxed(0, ctrl_base_addr + DED_ERROR_INT_MASK);
1097 writel_relaxed(0, ctrl_base_addr + DED_ERROR_CNT);
1099 writel_relaxed(0, bridge_base_addr + IMASK_HOST);
1100 writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
1102 /* Configure Address Translation Table 0 for PCIe config space */
1103 mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start & 0xffffffff,
1104 cfg->res.start, resource_size(&cfg->res));
1106 return mc_pcie_setup_windows(pdev, port);
1109 static const struct pci_ecam_ops mc_ecam_ops = {
1110 .init = mc_platform_init,
1112 .map_bus = pci_ecam_map_bus,
1113 .read = pci_generic_config_read,
1114 .write = pci_generic_config_write,
1118 static const struct of_device_id mc_pcie_of_match[] = {
1120 .compatible = "microchip,pcie-host-1.0",
1121 .data = &mc_ecam_ops,
1126 MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
1128 static struct platform_driver mc_pcie_driver = {
1129 .probe = pci_host_common_probe,
1131 .name = "microchip-pcie",
1132 .of_match_table = mc_pcie_of_match,
1133 .suppress_bind_attrs = true,
1137 builtin_platform_driver(mc_pcie_driver);
1138 MODULE_LICENSE("GPL");
1139 MODULE_DESCRIPTION("Microchip PCIe host controller driver");