1 // SPDX-License-Identifier: GPL-2.0
3 * Microchip AXI PCIe Bridge host controller driver
5 * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/module.h>
15 #include <linux/msi.h>
16 #include <linux/of_address.h>
17 #include <linux/of_pci.h>
18 #include <linux/pci-ecam.h>
19 #include <linux/platform_device.h>
23 /* Number of MSI IRQs */
24 #define MC_MAX_NUM_MSI_IRQS 32
26 /* PCIe Bridge Phy and Controller Phy offsets */
27 #define MC_PCIE1_BRIDGE_ADDR 0x00008000u
28 #define MC_PCIE1_CTRL_ADDR 0x0000a000u
30 #define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
31 #define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
33 /* PCIe Bridge Phy Regs */
34 #define PCIE_PCI_IRQ_DW0 0xa8
35 #define MSIX_CAP_MASK BIT(31)
36 #define NUM_MSI_MSGS_MASK GENMASK(6, 4)
37 #define NUM_MSI_MSGS_SHIFT 4
39 #define IMASK_LOCAL 0x180
40 #define DMA_END_ENGINE_0_MASK 0x00000000u
41 #define DMA_END_ENGINE_0_SHIFT 0
42 #define DMA_END_ENGINE_1_MASK 0x00000000u
43 #define DMA_END_ENGINE_1_SHIFT 1
44 #define DMA_ERROR_ENGINE_0_MASK 0x00000100u
45 #define DMA_ERROR_ENGINE_0_SHIFT 8
46 #define DMA_ERROR_ENGINE_1_MASK 0x00000200u
47 #define DMA_ERROR_ENGINE_1_SHIFT 9
48 #define A_ATR_EVT_POST_ERR_MASK 0x00010000u
49 #define A_ATR_EVT_POST_ERR_SHIFT 16
50 #define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
51 #define A_ATR_EVT_FETCH_ERR_SHIFT 17
52 #define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
53 #define A_ATR_EVT_DISCARD_ERR_SHIFT 18
54 #define A_ATR_EVT_DOORBELL_MASK 0x00000000u
55 #define A_ATR_EVT_DOORBELL_SHIFT 19
56 #define P_ATR_EVT_POST_ERR_MASK 0x00100000u
57 #define P_ATR_EVT_POST_ERR_SHIFT 20
58 #define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
59 #define P_ATR_EVT_FETCH_ERR_SHIFT 21
60 #define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
61 #define P_ATR_EVT_DISCARD_ERR_SHIFT 22
62 #define P_ATR_EVT_DOORBELL_MASK 0x00000000u
63 #define P_ATR_EVT_DOORBELL_SHIFT 23
64 #define PM_MSI_INT_INTA_MASK 0x01000000u
65 #define PM_MSI_INT_INTA_SHIFT 24
66 #define PM_MSI_INT_INTB_MASK 0x02000000u
67 #define PM_MSI_INT_INTB_SHIFT 25
68 #define PM_MSI_INT_INTC_MASK 0x04000000u
69 #define PM_MSI_INT_INTC_SHIFT 26
70 #define PM_MSI_INT_INTD_MASK 0x08000000u
71 #define PM_MSI_INT_INTD_SHIFT 27
72 #define PM_MSI_INT_INTX_MASK 0x0f000000u
73 #define PM_MSI_INT_INTX_SHIFT 24
74 #define PM_MSI_INT_MSI_MASK 0x10000000u
75 #define PM_MSI_INT_MSI_SHIFT 28
76 #define PM_MSI_INT_AER_EVT_MASK 0x20000000u
77 #define PM_MSI_INT_AER_EVT_SHIFT 29
78 #define PM_MSI_INT_EVENTS_MASK 0x40000000u
79 #define PM_MSI_INT_EVENTS_SHIFT 30
80 #define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
81 #define PM_MSI_INT_SYS_ERR_SHIFT 31
82 #define NUM_LOCAL_EVENTS 15
83 #define ISTATUS_LOCAL 0x184
84 #define IMASK_HOST 0x188
85 #define ISTATUS_HOST 0x18c
86 #define IMSI_ADDR 0x190
87 #define ISTATUS_MSI 0x194
89 /* PCIe Master table init defines */
90 #define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
91 #define ATR0_PCIE_ATR_SIZE 0x25
92 #define ATR0_PCIE_ATR_SIZE_SHIFT 1
93 #define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
94 #define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
95 #define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
96 #define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
98 /* PCIe AXI slave table init defines */
99 #define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
100 #define ATR_SIZE_SHIFT 1
101 #define ATR_IMPL_ENABLE 1
102 #define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
103 #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
104 #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
105 #define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
106 #define PCIE_TX_RX_INTERFACE 0x00000000u
107 #define PCIE_CONFIG_INTERFACE 0x00000001u
109 #define ATR_ENTRY_SIZE 32
111 /* PCIe Controller Phy Regs */
112 #define SEC_ERROR_EVENT_CNT 0x20
113 #define DED_ERROR_EVENT_CNT 0x24
114 #define SEC_ERROR_INT 0x28
115 #define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
116 #define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
117 #define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
118 #define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
119 #define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
120 #define NUM_SEC_ERROR_INTS (4)
121 #define SEC_ERROR_INT_MASK 0x2c
122 #define DED_ERROR_INT 0x30
123 #define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
124 #define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
125 #define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
126 #define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
127 #define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
128 #define NUM_DED_ERROR_INTS (4)
129 #define DED_ERROR_INT_MASK 0x34
130 #define ECC_CONTROL 0x38
131 #define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
132 #define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
133 #define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
134 #define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
135 #define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
136 #define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
137 #define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
138 #define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
139 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
140 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
141 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
142 #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
143 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
144 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
145 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
146 #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
147 #define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
148 #define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
149 #define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
150 #define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
151 #define PCIE_EVENT_INT 0x14c
152 #define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
153 #define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
154 #define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
155 #define PCIE_EVENT_INT_MASK GENMASK(2, 0)
156 #define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
157 #define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
158 #define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
159 #define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
160 #define PCIE_EVENT_INT_ENB_SHIFT 16
161 #define NUM_PCIE_EVENTS (3)
163 /* PCIe Config space MSI capability structure */
164 #define MC_MSI_CAP_CTRL_OFFSET 0xe0u
167 #define EVENT_PCIE_L2_EXIT 0
168 #define EVENT_PCIE_HOTRST_EXIT 1
169 #define EVENT_PCIE_DLUP_EXIT 2
170 #define EVENT_SEC_TX_RAM_SEC_ERR 3
171 #define EVENT_SEC_RX_RAM_SEC_ERR 4
172 #define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
173 #define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
174 #define EVENT_DED_TX_RAM_DED_ERR 7
175 #define EVENT_DED_RX_RAM_DED_ERR 8
176 #define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
177 #define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
178 #define EVENT_LOCAL_DMA_END_ENGINE_0 11
179 #define EVENT_LOCAL_DMA_END_ENGINE_1 12
180 #define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
181 #define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
182 #define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
183 #define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
184 #define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
185 #define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
186 #define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
187 #define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
188 #define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
189 #define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
190 #define EVENT_LOCAL_PM_MSI_INT_INTX 23
191 #define EVENT_LOCAL_PM_MSI_INT_MSI 24
192 #define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
193 #define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
194 #define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
195 #define NUM_EVENTS 28
197 #define PCIE_EVENT_CAUSE(x, s) \
198 [EVENT_PCIE_ ## x] = { __stringify(x), s }
200 #define SEC_ERROR_CAUSE(x, s) \
201 [EVENT_SEC_ ## x] = { __stringify(x), s }
203 #define DED_ERROR_CAUSE(x, s) \
204 [EVENT_DED_ ## x] = { __stringify(x), s }
206 #define LOCAL_EVENT_CAUSE(x, s) \
207 [EVENT_LOCAL_ ## x] = { __stringify(x), s }
209 #define PCIE_EVENT(x) \
210 .base = MC_PCIE_CTRL_ADDR, \
211 .offset = PCIE_EVENT_INT, \
212 .mask_offset = PCIE_EVENT_INT, \
214 .mask = PCIE_EVENT_INT_ ## x ## _INT, \
215 .enb_mask = PCIE_EVENT_INT_ENB_MASK
217 #define SEC_EVENT(x) \
218 .base = MC_PCIE_CTRL_ADDR, \
219 .offset = SEC_ERROR_INT, \
220 .mask_offset = SEC_ERROR_INT_MASK, \
221 .mask = SEC_ERROR_INT_ ## x ## _INT, \
225 #define DED_EVENT(x) \
226 .base = MC_PCIE_CTRL_ADDR, \
227 .offset = DED_ERROR_INT, \
228 .mask_offset = DED_ERROR_INT_MASK, \
230 .mask = DED_ERROR_INT_ ## x ## _INT, \
233 #define LOCAL_EVENT(x) \
234 .base = MC_PCIE_BRIDGE_ADDR, \
235 .offset = ISTATUS_LOCAL, \
236 .mask_offset = IMASK_LOCAL, \
238 .mask = x ## _MASK, \
241 #define PCIE_EVENT_TO_EVENT_MAP(x) \
242 { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
244 #define SEC_ERROR_TO_EVENT_MAP(x) \
245 { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
247 #define DED_ERROR_TO_EVENT_MAP(x) \
248 { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
250 #define LOCAL_STATUS_TO_EVENT_MAP(x) \
251 { x ## _MASK, EVENT_LOCAL_ ## x }
259 struct mutex lock; /* Protect used bitmap */
260 struct irq_domain *msi_domain;
261 struct irq_domain *dev_domain;
264 DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
268 void __iomem *axi_base_addr;
270 struct irq_domain *intx_domain;
271 struct irq_domain *event_domain;
281 static const struct cause event_cause[NUM_EVENTS] = {
282 PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
283 PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
284 PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
285 SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
286 SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
287 SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
288 SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
289 DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
290 DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
291 DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
292 DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
293 LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
294 LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
295 LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
296 LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
297 LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
298 LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
299 LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
300 LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
301 LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
302 LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
303 LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
306 static struct event_map pcie_event_to_event[] = {
307 PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
308 PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
309 PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
312 static struct event_map sec_error_to_event[] = {
313 SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
314 SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
315 SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
316 SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
319 static struct event_map ded_error_to_event[] = {
320 DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
321 DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
322 DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
323 DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
326 static struct event_map local_status_to_event[] = {
327 LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
328 LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
329 LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
330 LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
331 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
332 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
333 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
334 LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
335 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
336 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
337 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
338 LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
339 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
340 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
341 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
342 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
343 LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
355 { PCIE_EVENT(L2_EXIT) },
356 { PCIE_EVENT(HOTRST_EXIT) },
357 { PCIE_EVENT(DLUP_EXIT) },
358 { SEC_EVENT(TX_RAM_SEC_ERR) },
359 { SEC_EVENT(RX_RAM_SEC_ERR) },
360 { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
361 { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
362 { DED_EVENT(TX_RAM_DED_ERR) },
363 { DED_EVENT(RX_RAM_DED_ERR) },
364 { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
365 { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
366 { LOCAL_EVENT(DMA_END_ENGINE_0) },
367 { LOCAL_EVENT(DMA_END_ENGINE_1) },
368 { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
369 { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
370 { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
371 { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
372 { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
373 { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
374 { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
375 { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
376 { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
377 { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
378 { LOCAL_EVENT(PM_MSI_INT_INTX) },
379 { LOCAL_EVENT(PM_MSI_INT_MSI) },
380 { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
381 { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
382 { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
385 static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
387 static struct mc_pcie *port;
389 static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
391 struct mc_msi *msi = &port->msi;
395 /* Fixup MSI enable flag */
396 reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
397 reg |= PCI_MSI_FLAGS_ENABLE;
398 writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
400 /* Fixup PCI MSI queue flags */
401 queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
402 reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
403 writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
405 /* Fixup MSI addr fields */
406 writel_relaxed(lower_32_bits(msi->vector_phy),
407 ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
408 writel_relaxed(upper_32_bits(msi->vector_phy),
409 ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
412 static void mc_handle_msi(struct irq_desc *desc)
414 struct mc_pcie *port = irq_desc_get_handler_data(desc);
415 struct irq_chip *chip = irq_desc_get_chip(desc);
416 struct device *dev = port->dev;
417 struct mc_msi *msi = &port->msi;
418 void __iomem *bridge_base_addr =
419 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
420 unsigned long status;
424 chained_irq_enter(chip, desc);
426 status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
427 if (status & PM_MSI_INT_MSI_MASK) {
428 writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
429 status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
430 for_each_set_bit(bit, &status, msi->num_vectors) {
431 ret = generic_handle_domain_irq(msi->dev_domain, bit);
433 dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
438 chained_irq_exit(chip, desc);
441 static void mc_msi_bottom_irq_ack(struct irq_data *data)
443 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
444 void __iomem *bridge_base_addr =
445 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
446 u32 bitpos = data->hwirq;
448 writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
451 static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
453 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
454 phys_addr_t addr = port->msi.vector_phy;
456 msg->address_lo = lower_32_bits(addr);
457 msg->address_hi = upper_32_bits(addr);
458 msg->data = data->hwirq;
460 dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
461 (int)data->hwirq, msg->address_hi, msg->address_lo);
464 static int mc_msi_set_affinity(struct irq_data *irq_data,
465 const struct cpumask *mask, bool force)
470 static struct irq_chip mc_msi_bottom_irq_chip = {
471 .name = "Microchip MSI",
472 .irq_ack = mc_msi_bottom_irq_ack,
473 .irq_compose_msi_msg = mc_compose_msi_msg,
474 .irq_set_affinity = mc_msi_set_affinity,
477 static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
478 unsigned int nr_irqs, void *args)
480 struct mc_pcie *port = domain->host_data;
481 struct mc_msi *msi = &port->msi;
484 mutex_lock(&msi->lock);
485 bit = find_first_zero_bit(msi->used, msi->num_vectors);
486 if (bit >= msi->num_vectors) {
487 mutex_unlock(&msi->lock);
491 set_bit(bit, msi->used);
493 irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
494 domain->host_data, handle_edge_irq, NULL, NULL);
496 mutex_unlock(&msi->lock);
501 static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
502 unsigned int nr_irqs)
504 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
505 struct mc_pcie *port = irq_data_get_irq_chip_data(d);
506 struct mc_msi *msi = &port->msi;
508 mutex_lock(&msi->lock);
510 if (test_bit(d->hwirq, msi->used))
511 __clear_bit(d->hwirq, msi->used);
513 dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
515 mutex_unlock(&msi->lock);
518 static const struct irq_domain_ops msi_domain_ops = {
519 .alloc = mc_irq_msi_domain_alloc,
520 .free = mc_irq_msi_domain_free,
523 static struct irq_chip mc_msi_irq_chip = {
524 .name = "Microchip PCIe MSI",
525 .irq_ack = irq_chip_ack_parent,
526 .irq_mask = pci_msi_mask_irq,
527 .irq_unmask = pci_msi_unmask_irq,
530 static struct msi_domain_info mc_msi_domain_info = {
531 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
533 .chip = &mc_msi_irq_chip,
536 static int mc_allocate_msi_domains(struct mc_pcie *port)
538 struct device *dev = port->dev;
539 struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
540 struct mc_msi *msi = &port->msi;
542 mutex_init(&port->msi.lock);
544 msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
545 &msi_domain_ops, port);
546 if (!msi->dev_domain) {
547 dev_err(dev, "failed to create IRQ domain\n");
551 msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
553 if (!msi->msi_domain) {
554 dev_err(dev, "failed to create MSI domain\n");
555 irq_domain_remove(msi->dev_domain);
562 static void mc_handle_intx(struct irq_desc *desc)
564 struct mc_pcie *port = irq_desc_get_handler_data(desc);
565 struct irq_chip *chip = irq_desc_get_chip(desc);
566 struct device *dev = port->dev;
567 void __iomem *bridge_base_addr =
568 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
569 unsigned long status;
573 chained_irq_enter(chip, desc);
575 status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
576 if (status & PM_MSI_INT_INTX_MASK) {
577 status &= PM_MSI_INT_INTX_MASK;
578 status >>= PM_MSI_INT_INTX_SHIFT;
579 for_each_set_bit(bit, &status, PCI_NUM_INTX) {
580 ret = generic_handle_domain_irq(port->intx_domain, bit);
582 dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
587 chained_irq_exit(chip, desc);
590 static void mc_ack_intx_irq(struct irq_data *data)
592 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
593 void __iomem *bridge_base_addr =
594 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
595 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
597 writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
600 static void mc_mask_intx_irq(struct irq_data *data)
602 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
603 void __iomem *bridge_base_addr =
604 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
606 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
609 raw_spin_lock_irqsave(&port->lock, flags);
610 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
612 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
613 raw_spin_unlock_irqrestore(&port->lock, flags);
616 static void mc_unmask_intx_irq(struct irq_data *data)
618 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
619 void __iomem *bridge_base_addr =
620 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
622 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
625 raw_spin_lock_irqsave(&port->lock, flags);
626 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
628 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
629 raw_spin_unlock_irqrestore(&port->lock, flags);
632 static struct irq_chip mc_intx_irq_chip = {
633 .name = "Microchip PCIe INTx",
634 .irq_ack = mc_ack_intx_irq,
635 .irq_mask = mc_mask_intx_irq,
636 .irq_unmask = mc_unmask_intx_irq,
639 static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
640 irq_hw_number_t hwirq)
642 irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
643 irq_set_chip_data(irq, domain->host_data);
648 static const struct irq_domain_ops intx_domain_ops = {
649 .map = mc_pcie_intx_map,
652 static inline u32 reg_to_event(u32 reg, struct event_map field)
654 return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
657 static u32 pcie_events(struct mc_pcie *port)
659 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
660 u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT);
664 for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
665 val |= reg_to_event(reg, pcie_event_to_event[i]);
670 static u32 sec_errors(struct mc_pcie *port)
672 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
673 u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT);
677 for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
678 val |= reg_to_event(reg, sec_error_to_event[i]);
683 static u32 ded_errors(struct mc_pcie *port)
685 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
686 u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT);
690 for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
691 val |= reg_to_event(reg, ded_error_to_event[i]);
696 static u32 local_events(struct mc_pcie *port)
698 void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
699 u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
703 for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
704 val |= reg_to_event(reg, local_status_to_event[i]);
709 static u32 get_events(struct mc_pcie *port)
713 events |= pcie_events(port);
714 events |= sec_errors(port);
715 events |= ded_errors(port);
716 events |= local_events(port);
721 static irqreturn_t mc_event_handler(int irq, void *dev_id)
723 struct mc_pcie *port = dev_id;
724 struct device *dev = port->dev;
725 struct irq_data *data;
727 data = irq_domain_get_irq_data(port->event_domain, irq);
729 if (event_cause[data->hwirq].str)
730 dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
732 dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
737 static void mc_handle_event(struct irq_desc *desc)
739 struct mc_pcie *port = irq_desc_get_handler_data(desc);
740 unsigned long events;
742 struct irq_chip *chip = irq_desc_get_chip(desc);
744 chained_irq_enter(chip, desc);
746 events = get_events(port);
748 for_each_set_bit(bit, &events, NUM_EVENTS)
749 generic_handle_domain_irq(port->event_domain, bit);
751 chained_irq_exit(chip, desc);
754 static void mc_ack_event_irq(struct irq_data *data)
756 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
757 u32 event = data->hwirq;
761 addr = port->axi_base_addr + event_descs[event].base +
762 event_descs[event].offset;
763 mask = event_descs[event].mask;
764 mask |= event_descs[event].enb_mask;
766 writel_relaxed(mask, addr);
769 static void mc_mask_event_irq(struct irq_data *data)
771 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
772 u32 event = data->hwirq;
777 addr = port->axi_base_addr + event_descs[event].base +
778 event_descs[event].mask_offset;
779 mask = event_descs[event].mask;
780 if (event_descs[event].enb_mask) {
781 mask <<= PCIE_EVENT_INT_ENB_SHIFT;
782 mask &= PCIE_EVENT_INT_ENB_MASK;
785 if (!event_descs[event].mask_high)
788 raw_spin_lock(&port->lock);
789 val = readl_relaxed(addr);
790 if (event_descs[event].mask_high)
795 writel_relaxed(val, addr);
796 raw_spin_unlock(&port->lock);
799 static void mc_unmask_event_irq(struct irq_data *data)
801 struct mc_pcie *port = irq_data_get_irq_chip_data(data);
802 u32 event = data->hwirq;
807 addr = port->axi_base_addr + event_descs[event].base +
808 event_descs[event].mask_offset;
809 mask = event_descs[event].mask;
811 if (event_descs[event].enb_mask)
812 mask <<= PCIE_EVENT_INT_ENB_SHIFT;
814 if (event_descs[event].mask_high)
817 if (event_descs[event].enb_mask)
818 mask &= PCIE_EVENT_INT_ENB_MASK;
820 raw_spin_lock(&port->lock);
821 val = readl_relaxed(addr);
822 if (event_descs[event].mask_high)
826 writel_relaxed(val, addr);
827 raw_spin_unlock(&port->lock);
830 static struct irq_chip mc_event_irq_chip = {
831 .name = "Microchip PCIe EVENT",
832 .irq_ack = mc_ack_event_irq,
833 .irq_mask = mc_mask_event_irq,
834 .irq_unmask = mc_unmask_event_irq,
837 static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
838 irq_hw_number_t hwirq)
840 irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
841 irq_set_chip_data(irq, domain->host_data);
846 static const struct irq_domain_ops event_domain_ops = {
847 .map = mc_pcie_event_map,
850 static inline void mc_pcie_deinit_clk(void *data)
852 struct clk *clk = data;
854 clk_disable_unprepare(clk);
857 static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
862 clk = devm_clk_get_optional(dev, id);
868 ret = clk_prepare_enable(clk);
872 devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
877 static int mc_pcie_init_clks(struct device *dev)
883 * PCIe may be clocked via Fabric Interface using between 1 and 4
884 * clocks. Scan DT for clocks and enable them if present
886 for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
887 fic = mc_pcie_init_clk(dev, poss_clks[i]);
895 static int mc_pcie_init_irq_domains(struct mc_pcie *port)
897 struct device *dev = port->dev;
898 struct device_node *node = dev->of_node;
899 struct device_node *pcie_intc_node;
902 pcie_intc_node = of_get_next_child(node, NULL);
903 if (!pcie_intc_node) {
904 dev_err(dev, "failed to find PCIe Intc node\n");
908 port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
909 &event_domain_ops, port);
910 if (!port->event_domain) {
911 dev_err(dev, "failed to get event domain\n");
912 of_node_put(pcie_intc_node);
916 irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
918 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
919 &intx_domain_ops, port);
920 if (!port->intx_domain) {
921 dev_err(dev, "failed to get an INTx IRQ domain\n");
922 of_node_put(pcie_intc_node);
926 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
928 of_node_put(pcie_intc_node);
929 raw_spin_lock_init(&port->lock);
931 return mc_allocate_msi_domains(port);
934 static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
935 phys_addr_t axi_addr, phys_addr_t pci_addr,
938 u32 atr_sz = ilog2(size) - 1;
942 val = PCIE_CONFIG_INTERFACE;
944 val = PCIE_TX_RX_INTERFACE;
946 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
947 ATR0_AXI4_SLV0_TRSL_PARAM);
949 val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
951 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
952 ATR0_AXI4_SLV0_SRCADDR_PARAM);
954 val = upper_32_bits(axi_addr);
955 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
956 ATR0_AXI4_SLV0_SRC_ADDR);
958 val = lower_32_bits(pci_addr);
959 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
960 ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
962 val = upper_32_bits(pci_addr);
963 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
964 ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
966 val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
967 val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
968 writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
969 writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
972 static int mc_pcie_setup_windows(struct platform_device *pdev,
973 struct mc_pcie *port)
975 void __iomem *bridge_base_addr =
976 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
977 struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
978 struct resource_entry *entry;
982 resource_list_for_each_entry(entry, &bridge->windows) {
983 if (resource_type(entry->res) == IORESOURCE_MEM) {
984 pci_addr = entry->res->start - entry->offset;
985 mc_pcie_setup_window(bridge_base_addr, index,
986 entry->res->start, pci_addr,
987 resource_size(entry->res));
995 static inline void mc_clear_secs(struct mc_pcie *port)
997 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
999 writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
1001 writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT);
1004 static inline void mc_clear_deds(struct mc_pcie *port)
1006 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
1008 writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
1010 writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT);
1013 static void mc_disable_interrupts(struct mc_pcie *port)
1015 void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
1016 void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
1019 /* Ensure ECC bypass is enabled */
1020 val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
1021 ECC_CONTROL_RX_RAM_ECC_BYPASS |
1022 ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
1023 ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
1024 writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
1026 /* Disable SEC errors and clear any outstanding */
1027 writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
1028 SEC_ERROR_INT_MASK);
1029 mc_clear_secs(port);
1031 /* Disable DED errors and clear any outstanding */
1032 writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
1033 DED_ERROR_INT_MASK);
1034 mc_clear_deds(port);
1036 /* Disable local interrupts and clear any outstanding */
1037 writel_relaxed(0, bridge_base_addr + IMASK_LOCAL);
1038 writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL);
1039 writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI);
1041 /* Disable PCIe events and clear any outstanding */
1042 val = PCIE_EVENT_INT_L2_EXIT_INT |
1043 PCIE_EVENT_INT_HOTRST_EXIT_INT |
1044 PCIE_EVENT_INT_DLUP_EXIT_INT |
1045 PCIE_EVENT_INT_L2_EXIT_INT_MASK |
1046 PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
1047 PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
1048 writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
1050 /* Disable host interrupts and clear any outstanding */
1051 writel_relaxed(0, bridge_base_addr + IMASK_HOST);
1052 writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
1055 static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port)
1057 struct device *dev = &pdev->dev;
1059 int i, intx_irq, msi_irq, event_irq;
1062 ret = mc_pcie_init_irq_domains(port);
1064 dev_err(dev, "failed creating IRQ domains\n");
1068 irq = platform_get_irq(pdev, 0);
1072 for (i = 0; i < NUM_EVENTS; i++) {
1073 event_irq = irq_create_mapping(port->event_domain, i);
1075 dev_err(dev, "failed to map hwirq %d\n", i);
1079 ret = devm_request_irq(dev, event_irq, mc_event_handler,
1080 0, event_cause[i].sym, port);
1082 dev_err(dev, "failed to request IRQ %d\n", event_irq);
1087 intx_irq = irq_create_mapping(port->event_domain,
1088 EVENT_LOCAL_PM_MSI_INT_INTX);
1090 dev_err(dev, "failed to map INTx interrupt\n");
1094 /* Plug the INTx chained handler */
1095 irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
1097 msi_irq = irq_create_mapping(port->event_domain,
1098 EVENT_LOCAL_PM_MSI_INT_MSI);
1102 /* Plug the MSI chained handler */
1103 irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
1105 /* Plug the main event chained handler */
1106 irq_set_chained_handler_and_data(irq, mc_handle_event, port);
1111 static int mc_platform_init(struct pci_config_window *cfg)
1113 struct device *dev = cfg->parent;
1114 struct platform_device *pdev = to_platform_device(dev);
1115 void __iomem *bridge_base_addr =
1116 port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
1119 /* Configure address translation table 0 for PCIe config space */
1120 mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
1122 resource_size(&cfg->res));
1124 /* Need some fixups in config space */
1125 mc_pcie_enable_msi(port, cfg->win);
1127 /* Configure non-config space outbound ranges */
1128 ret = mc_pcie_setup_windows(pdev, port);
1132 /* Address translation is up; safe to enable interrupts */
1133 ret = mc_init_interrupts(pdev, port);
1140 static int mc_host_probe(struct platform_device *pdev)
1142 struct device *dev = &pdev->dev;
1143 void __iomem *bridge_base_addr;
1147 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1153 port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
1154 if (IS_ERR(port->axi_base_addr))
1155 return PTR_ERR(port->axi_base_addr);
1157 mc_disable_interrupts(port);
1159 bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
1161 /* Allow enabling MSI by disabling MSI-X */
1162 val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
1163 val &= ~MSIX_CAP_MASK;
1164 writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0);
1166 /* Pick num vectors from bitfile programmed onto FPGA fabric */
1167 val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
1168 val &= NUM_MSI_MSGS_MASK;
1169 val >>= NUM_MSI_MSGS_SHIFT;
1171 port->msi.num_vectors = 1 << val;
1173 /* Pick vector address from design */
1174 port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
1176 ret = mc_pcie_init_clks(dev);
1178 dev_err(dev, "failed to get clock resources, error %d\n", ret);
1182 return pci_host_common_probe(pdev);
1185 static const struct pci_ecam_ops mc_ecam_ops = {
1186 .init = mc_platform_init,
1188 .map_bus = pci_ecam_map_bus,
1189 .read = pci_generic_config_read,
1190 .write = pci_generic_config_write,
1194 static const struct of_device_id mc_pcie_of_match[] = {
1196 .compatible = "microchip,pcie-host-1.0",
1197 .data = &mc_ecam_ops,
1202 MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
1204 static struct platform_driver mc_pcie_driver = {
1205 .probe = mc_host_probe,
1207 .name = "microchip-pcie",
1208 .of_match_table = mc_pcie_of_match,
1209 .suppress_bind_attrs = true,
1213 builtin_platform_driver(mc_pcie_driver);
1214 MODULE_LICENSE("GPL");
1215 MODULE_DESCRIPTION("Microchip PCIe host controller driver");