]>
Commit | Line | Data |
---|---|---|
4848704a SM |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
2cb32607 | 3 | * Copyright (C) 2020-2021 Broadcom |
4848704a SM |
4 | * |
5 | */ | |
6 | ||
4848704a SM |
7 | #include <dm.h> |
8 | #include <errno.h> | |
9 | #include <generic-phy.h> | |
10 | #include <pci.h> | |
11 | #include <malloc.h> | |
12 | #include <asm/io.h> | |
13 | #include <dm/device_compat.h> | |
2cb32607 | 14 | #include <linux/delay.h> |
4848704a SM |
15 | #include <linux/log2.h> |
16 | ||
17 | #define EP_PERST_SOURCE_SELECT_SHIFT 2 | |
18 | #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) | |
19 | #define EP_MODE_SURVIVE_PERST_SHIFT 1 | |
20 | #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) | |
21 | #define RC_PCIE_RST_OUTPUT_SHIFT 0 | |
22 | #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) | |
23 | ||
24 | #define CFG_IND_ADDR_MASK 0x00001ffc | |
25 | ||
3264b617 | 26 | #define CFG_ADDR_CFG_ECAM_MASK 0xfffffffc |
4848704a SM |
27 | #define CFG_ADDR_CFG_TYPE_MASK 0x00000003 |
28 | ||
29 | #define IPROC_PCI_PM_CAP 0x48 | |
30 | #define IPROC_PCI_PM_CAP_MASK 0xffff | |
31 | #define IPROC_PCI_EXP_CAP 0xac | |
32 | ||
33 | #define IPROC_PCIE_REG_INVALID 0xffff | |
34 | ||
35 | #define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ | |
36 | #define PCI_EXP_RTCTL 28 /* Root Control */ | |
37 | /* CRS Software Visibility capability */ | |
38 | #define PCI_EXP_RTCAP_CRSVIS 0x0001 | |
39 | ||
40 | #define PCI_EXP_LNKSTA 18 /* Link Status */ | |
41 | #define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ | |
42 | ||
43 | #define PCIE_PHYLINKUP_SHIFT 3 | |
44 | #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) | |
45 | #define PCIE_DL_ACTIVE_SHIFT 2 | |
46 | #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) | |
47 | ||
48 | /* derive the enum index of the outbound/inbound mapping registers */ | |
49 | #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) | |
50 | ||
51 | /* | |
52 | * Maximum number of outbound mapping window sizes that can be supported by any | |
53 | * OARR/OMAP mapping pair | |
54 | */ | |
55 | #define MAX_NUM_OB_WINDOW_SIZES 4 | |
56 | ||
57 | #define OARR_VALID_SHIFT 0 | |
58 | #define OARR_VALID BIT(OARR_VALID_SHIFT) | |
59 | #define OARR_SIZE_CFG_SHIFT 1 | |
60 | ||
61 | /* | |
62 | * Maximum number of inbound mapping region sizes that can be supported by an | |
63 | * IARR | |
64 | */ | |
65 | #define MAX_NUM_IB_REGION_SIZES 9 | |
66 | ||
67 | #define IMAP_VALID_SHIFT 0 | |
68 | #define IMAP_VALID BIT(IMAP_VALID_SHIFT) | |
69 | ||
70 | #define APB_ERR_EN_SHIFT 0 | |
71 | #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) | |
72 | ||
73 | /** | |
74 | * iProc PCIe host registers | |
75 | */ | |
76 | enum iproc_pcie_reg { | |
77 | /* clock/reset signal control */ | |
78 | IPROC_PCIE_CLK_CTRL = 0, | |
79 | ||
80 | /* | |
81 | * To allow MSI to be steered to an external MSI controller (e.g., ARM | |
82 | * GICv3 ITS) | |
83 | */ | |
84 | IPROC_PCIE_MSI_GIC_MODE, | |
85 | ||
86 | /* | |
87 | * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the | |
88 | * window where the MSI posted writes are written, for the writes to be | |
89 | * interpreted as MSI writes. | |
90 | */ | |
91 | IPROC_PCIE_MSI_BASE_ADDR, | |
92 | IPROC_PCIE_MSI_WINDOW_SIZE, | |
93 | ||
94 | /* | |
95 | * To hold the address of the register where the MSI writes are | |
96 | * programed. When ARM GICv3 ITS is used, this should be programmed | |
97 | * with the address of the GITS_TRANSLATER register. | |
98 | */ | |
99 | IPROC_PCIE_MSI_ADDR_LO, | |
100 | IPROC_PCIE_MSI_ADDR_HI, | |
101 | ||
102 | /* enable MSI */ | |
103 | IPROC_PCIE_MSI_EN_CFG, | |
104 | ||
105 | /* allow access to root complex configuration space */ | |
106 | IPROC_PCIE_CFG_IND_ADDR, | |
107 | IPROC_PCIE_CFG_IND_DATA, | |
108 | ||
109 | /* allow access to device configuration space */ | |
110 | IPROC_PCIE_CFG_ADDR, | |
111 | IPROC_PCIE_CFG_DATA, | |
112 | ||
113 | /* enable INTx */ | |
114 | IPROC_PCIE_INTX_EN, | |
115 | IPROC_PCIE_INTX_CSR, | |
116 | ||
117 | /* outbound address mapping */ | |
118 | IPROC_PCIE_OARR0, | |
119 | IPROC_PCIE_OMAP0, | |
120 | IPROC_PCIE_OARR1, | |
121 | IPROC_PCIE_OMAP1, | |
122 | IPROC_PCIE_OARR2, | |
123 | IPROC_PCIE_OMAP2, | |
124 | IPROC_PCIE_OARR3, | |
125 | IPROC_PCIE_OMAP3, | |
126 | ||
127 | /* inbound address mapping */ | |
128 | IPROC_PCIE_IARR0, | |
129 | IPROC_PCIE_IMAP0, | |
130 | IPROC_PCIE_IARR1, | |
131 | IPROC_PCIE_IMAP1, | |
132 | IPROC_PCIE_IARR2, | |
133 | IPROC_PCIE_IMAP2, | |
134 | IPROC_PCIE_IARR3, | |
135 | IPROC_PCIE_IMAP3, | |
136 | IPROC_PCIE_IARR4, | |
137 | IPROC_PCIE_IMAP4, | |
138 | ||
139 | /* config read status */ | |
140 | IPROC_PCIE_CFG_RD_STATUS, | |
141 | ||
142 | /* link status */ | |
143 | IPROC_PCIE_LINK_STATUS, | |
144 | ||
145 | /* enable APB error for unsupported requests */ | |
146 | IPROC_PCIE_APB_ERR_EN, | |
147 | ||
148 | /* Ordering Mode configuration registers */ | |
149 | IPROC_PCIE_ORDERING_CFG, | |
150 | IPROC_PCIE_IMAP0_RO_CONTROL, | |
151 | IPROC_PCIE_IMAP1_RO_CONTROL, | |
152 | IPROC_PCIE_IMAP2_RO_CONTROL, | |
153 | IPROC_PCIE_IMAP3_RO_CONTROL, | |
154 | IPROC_PCIE_IMAP4_RO_CONTROL, | |
155 | ||
156 | /* total number of core registers */ | |
157 | IPROC_PCIE_MAX_NUM_REG, | |
158 | }; | |
159 | ||
160 | /* iProc PCIe PAXB v2 registers */ | |
161 | static const u16 iproc_pcie_reg_paxb_v2[] = { | |
162 | [IPROC_PCIE_CLK_CTRL] = 0x000, | |
163 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, | |
164 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, | |
165 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | |
166 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | |
167 | [IPROC_PCIE_INTX_EN] = 0x330, | |
168 | [IPROC_PCIE_INTX_CSR] = 0x334, | |
169 | [IPROC_PCIE_OARR0] = 0xd20, | |
170 | [IPROC_PCIE_OMAP0] = 0xd40, | |
171 | [IPROC_PCIE_OARR1] = 0xd28, | |
172 | [IPROC_PCIE_OMAP1] = 0xd48, | |
173 | [IPROC_PCIE_OARR2] = 0xd60, | |
174 | [IPROC_PCIE_OMAP2] = 0xd68, | |
175 | [IPROC_PCIE_OARR3] = 0xdf0, | |
176 | [IPROC_PCIE_OMAP3] = 0xdf8, | |
177 | [IPROC_PCIE_IARR0] = 0xd00, | |
178 | [IPROC_PCIE_IMAP0] = 0xc00, | |
179 | [IPROC_PCIE_IARR2] = 0xd10, | |
180 | [IPROC_PCIE_IMAP2] = 0xcc0, | |
181 | [IPROC_PCIE_IARR3] = 0xe00, | |
182 | [IPROC_PCIE_IMAP3] = 0xe08, | |
183 | [IPROC_PCIE_IARR4] = 0xe68, | |
184 | [IPROC_PCIE_IMAP4] = 0xe70, | |
185 | [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, | |
186 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, | |
187 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, | |
188 | [IPROC_PCIE_ORDERING_CFG] = 0x2000, | |
189 | [IPROC_PCIE_IMAP0_RO_CONTROL] = 0x201c, | |
190 | [IPROC_PCIE_IMAP1_RO_CONTROL] = 0x2020, | |
191 | [IPROC_PCIE_IMAP2_RO_CONTROL] = 0x2024, | |
192 | [IPROC_PCIE_IMAP3_RO_CONTROL] = 0x2028, | |
193 | [IPROC_PCIE_IMAP4_RO_CONTROL] = 0x202c, | |
194 | }; | |
195 | ||
196 | /* iProc PCIe PAXC v2 registers */ | |
197 | static const u16 iproc_pcie_reg_paxc_v2[] = { | |
198 | [IPROC_PCIE_MSI_GIC_MODE] = 0x050, | |
199 | [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, | |
200 | [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, | |
201 | [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, | |
202 | [IPROC_PCIE_MSI_ADDR_HI] = 0x080, | |
203 | [IPROC_PCIE_MSI_EN_CFG] = 0x09c, | |
204 | [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, | |
205 | [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, | |
206 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | |
207 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | |
208 | }; | |
209 | ||
210 | /** | |
211 | * List of device IDs of controllers that have corrupted | |
212 | * capability list that require SW fixup | |
213 | */ | |
214 | static const u16 iproc_pcie_corrupt_cap_did[] = { | |
215 | 0x16cd, | |
216 | 0x16f0, | |
217 | 0xd802, | |
218 | 0xd804 | |
219 | }; | |
220 | ||
221 | enum iproc_pcie_type { | |
222 | IPROC_PCIE_PAXB_V2, | |
223 | IPROC_PCIE_PAXC, | |
224 | IPROC_PCIE_PAXC_V2, | |
225 | }; | |
226 | ||
227 | /** | |
228 | * struct iproc_pcie_ob - iProc PCIe outbound mapping | |
229 | * | |
230 | * @axi_offset: offset from the AXI address to the internal address used by | |
231 | * the iProc PCIe core | |
232 | * @nr_windows: total number of supported outbound mapping windows | |
233 | */ | |
234 | struct iproc_pcie_ob { | |
235 | resource_size_t axi_offset; | |
236 | unsigned int nr_windows; | |
237 | }; | |
238 | ||
239 | /** | |
240 | * struct iproc_pcie_ib - iProc PCIe inbound mapping | |
241 | * | |
242 | * @nr_regions: total number of supported inbound mapping regions | |
243 | */ | |
244 | struct iproc_pcie_ib { | |
245 | unsigned int nr_regions; | |
246 | }; | |
247 | ||
248 | /** | |
249 | * struct iproc_pcie_ob_map - outbound mapping controller specific parameters | |
250 | * | |
251 | * @window_sizes: list of supported outbound mapping window sizes in MB | |
252 | * @nr_sizes: number of supported outbound mapping window sizes | |
253 | */ | |
254 | struct iproc_pcie_ob_map { | |
255 | resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; | |
256 | unsigned int nr_sizes; | |
257 | }; | |
258 | ||
259 | static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { | |
260 | { | |
261 | /* OARR0/OMAP0 */ | |
262 | .window_sizes = { 128, 256 }, | |
263 | .nr_sizes = 2, | |
264 | }, | |
265 | { | |
266 | /* OARR1/OMAP1 */ | |
267 | .window_sizes = { 128, 256 }, | |
268 | .nr_sizes = 2, | |
269 | }, | |
270 | { | |
271 | /* OARR2/OMAP2 */ | |
272 | .window_sizes = { 128, 256, 512, 1024 }, | |
273 | .nr_sizes = 4, | |
274 | }, | |
275 | { | |
276 | /* OARR3/OMAP3 */ | |
277 | .window_sizes = { 128, 256, 512, 1024 }, | |
278 | .nr_sizes = 4, | |
279 | }, | |
280 | }; | |
281 | ||
282 | /** | |
283 | * iProc PCIe inbound mapping type | |
284 | */ | |
285 | enum iproc_pcie_ib_map_type { | |
286 | /* for DDR memory */ | |
287 | IPROC_PCIE_IB_MAP_MEM = 0, | |
288 | ||
289 | /* for device I/O memory */ | |
290 | IPROC_PCIE_IB_MAP_IO, | |
291 | ||
292 | /* invalid or unused */ | |
293 | IPROC_PCIE_IB_MAP_INVALID | |
294 | }; | |
295 | ||
296 | /** | |
297 | * struct iproc_pcie_ib_map - inbound mapping controller specific parameters | |
298 | * | |
299 | * @type: inbound mapping region type | |
300 | * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or SZ_1G | |
301 | * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or | |
302 | * GB, depedning on the size unit | |
303 | * @nr_sizes: number of supported inbound mapping region sizes | |
304 | * @nr_windows: number of supported inbound mapping windows for the region | |
305 | * @imap_addr_offset: register offset between the upper and lower 32-bit | |
306 | * IMAP address registers | |
307 | * @imap_window_offset: register offset between each IMAP window | |
308 | */ | |
309 | struct iproc_pcie_ib_map { | |
310 | enum iproc_pcie_ib_map_type type; | |
311 | unsigned int size_unit; | |
312 | resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; | |
313 | unsigned int nr_sizes; | |
314 | unsigned int nr_windows; | |
315 | u16 imap_addr_offset; | |
316 | u16 imap_window_offset; | |
317 | }; | |
318 | ||
319 | static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { | |
320 | { | |
321 | /* IARR0/IMAP0 */ | |
322 | .type = IPROC_PCIE_IB_MAP_IO, | |
323 | .size_unit = SZ_1K, | |
324 | .region_sizes = { 32 }, | |
325 | .nr_sizes = 1, | |
326 | .nr_windows = 8, | |
327 | .imap_addr_offset = 0x40, | |
328 | .imap_window_offset = 0x4, | |
329 | }, | |
330 | { | |
331 | /* IARR1/IMAP1 (currently unused) */ | |
332 | .type = IPROC_PCIE_IB_MAP_INVALID, | |
333 | }, | |
334 | { | |
335 | /* IARR2/IMAP2 */ | |
336 | .type = IPROC_PCIE_IB_MAP_MEM, | |
337 | .size_unit = SZ_1M, | |
338 | .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, | |
339 | 16384 }, | |
340 | .nr_sizes = 9, | |
341 | .nr_windows = 1, | |
342 | .imap_addr_offset = 0x4, | |
343 | .imap_window_offset = 0x8, | |
344 | }, | |
345 | { | |
346 | /* IARR3/IMAP3 */ | |
347 | .type = IPROC_PCIE_IB_MAP_MEM, | |
348 | .size_unit = SZ_1G, | |
349 | .region_sizes = { 1, 2, 4, 8, 16, 32 }, | |
350 | .nr_sizes = 6, | |
351 | .nr_windows = 8, | |
352 | .imap_addr_offset = 0x4, | |
353 | .imap_window_offset = 0x8, | |
354 | }, | |
355 | { | |
356 | /* IARR4/IMAP4 */ | |
357 | .type = IPROC_PCIE_IB_MAP_MEM, | |
358 | .size_unit = SZ_1G, | |
359 | .region_sizes = { 32, 64, 128, 256, 512 }, | |
360 | .nr_sizes = 5, | |
361 | .nr_windows = 8, | |
362 | .imap_addr_offset = 0x4, | |
363 | .imap_window_offset = 0x8, | |
364 | }, | |
365 | }; | |
366 | ||
367 | /** | |
368 | * struct iproc_pcie - iproc pcie device instance | |
369 | * | |
370 | * @dev: pointer to pcie udevice | |
371 | * @base: device I/O base address | |
372 | * @type: pci device type, PAXC or PAXB | |
373 | * @reg_offsets: pointer to pcie host register | |
374 | * @fix_paxc_cap: paxc capability | |
375 | * @need_ob_cfg: outbound mapping status | |
376 | * @ob: pcie outbound mapping | |
377 | * @ob_map: pointer to outbound mapping parameters | |
378 | * @need_ib_cfg: inbound mapping status | |
379 | * @ib: pcie inbound mapping | |
380 | * @ib_map: pointer to inbound mapping parameters | |
381 | * @ep_is_internal: ep status | |
382 | * @phy: phy device | |
383 | * @link_is_active: link up status | |
384 | * @has_apb_err_disable: apb error status | |
385 | */ | |
386 | struct iproc_pcie { | |
387 | struct udevice *dev; | |
388 | void __iomem *base; | |
389 | enum iproc_pcie_type type; | |
390 | u16 *reg_offsets; | |
391 | bool fix_paxc_cap; | |
392 | bool need_ob_cfg; | |
393 | struct iproc_pcie_ob ob; | |
394 | const struct iproc_pcie_ob_map *ob_map; | |
395 | bool need_ib_cfg; | |
396 | struct iproc_pcie_ib ib; | |
397 | const struct iproc_pcie_ib_map *ib_map; | |
398 | bool ep_is_internal; | |
399 | struct phy phy; | |
400 | bool link_is_active; | |
401 | bool has_apb_err_disable; | |
402 | }; | |
403 | ||
404 | static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) | |
405 | { | |
406 | return !!(reg_offset == IPROC_PCIE_REG_INVALID); | |
407 | } | |
408 | ||
409 | static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, | |
410 | enum iproc_pcie_reg reg) | |
411 | { | |
412 | return pcie->reg_offsets[reg]; | |
413 | } | |
414 | ||
415 | static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, | |
416 | enum iproc_pcie_reg reg) | |
417 | { | |
418 | u16 offset = iproc_pcie_reg_offset(pcie, reg); | |
419 | ||
420 | if (iproc_pcie_reg_is_invalid(offset)) | |
421 | return 0; | |
422 | ||
423 | return readl(pcie->base + offset); | |
424 | } | |
425 | ||
426 | static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, | |
427 | enum iproc_pcie_reg reg, u32 val) | |
428 | { | |
429 | u16 offset = iproc_pcie_reg_offset(pcie, reg); | |
430 | ||
431 | if (iproc_pcie_reg_is_invalid(offset)) | |
432 | return; | |
433 | ||
434 | writel(val, pcie->base + offset); | |
435 | } | |
436 | ||
437 | static int iproc_pcie_map_ep_cfg_reg(const struct udevice *udev, pci_dev_t bdf, | |
438 | uint where, void **paddress) | |
439 | { | |
440 | struct iproc_pcie *pcie = dev_get_priv(udev); | |
441 | unsigned int busno = PCI_BUS(bdf); | |
442 | unsigned int slot = PCI_DEV(bdf); | |
443 | unsigned int fn = PCI_FUNC(bdf); | |
444 | ||
445 | u16 offset; | |
446 | u32 val; | |
447 | ||
448 | /* root complex access */ | |
449 | if (busno == 0) { | |
450 | if (slot > 0 || fn > 0) | |
451 | return -ENODEV; | |
452 | ||
453 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, | |
454 | where & CFG_IND_ADDR_MASK); | |
455 | offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); | |
456 | if (iproc_pcie_reg_is_invalid(offset)) | |
457 | return -ENODEV; | |
458 | ||
459 | *paddress = (pcie->base + offset); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | if (!pcie->link_is_active) | |
464 | return -ENODEV; | |
465 | ||
466 | /* EP device access */ | |
3264b617 T |
467 | val = (PCIE_ECAM_OFFSET(busno, slot, fn, where) & CFG_ADDR_CFG_ECAM_MASK) |
468 | | (1 & CFG_ADDR_CFG_TYPE_MASK); | |
4848704a SM |
469 | |
470 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); | |
471 | offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); | |
472 | ||
473 | if (iproc_pcie_reg_is_invalid(offset)) | |
474 | return -ENODEV; | |
475 | ||
476 | *paddress = (pcie->base + offset); | |
477 | ||
478 | return 0; | |
479 | } | |
480 | ||
481 | static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, ulong *val) | |
482 | { | |
483 | u32 i, dev_id; | |
484 | ||
485 | switch (where & ~0x3) { | |
486 | case PCI_VENDOR_ID: | |
487 | dev_id = *val >> 16; | |
488 | ||
489 | /* | |
490 | * Activate fixup for those controllers that have corrupted | |
491 | * capability list registers | |
492 | */ | |
493 | for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) | |
494 | if (dev_id == iproc_pcie_corrupt_cap_did[i]) | |
495 | pcie->fix_paxc_cap = true; | |
496 | break; | |
497 | ||
498 | case IPROC_PCI_PM_CAP: | |
499 | if (pcie->fix_paxc_cap) { | |
500 | /* advertise PM, force next capability to PCIe */ | |
501 | *val &= ~IPROC_PCI_PM_CAP_MASK; | |
502 | *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM; | |
503 | } | |
504 | break; | |
505 | ||
506 | case IPROC_PCI_EXP_CAP: | |
507 | if (pcie->fix_paxc_cap) { | |
508 | /* advertise root port, version 2, terminate here */ | |
509 | *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 | | |
510 | PCI_CAP_ID_EXP; | |
511 | } | |
512 | break; | |
513 | ||
514 | case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: | |
515 | /* Don't advertise CRS SV support */ | |
516 | *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); | |
517 | break; | |
518 | ||
519 | default: | |
520 | break; | |
521 | } | |
522 | } | |
523 | ||
524 | static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, | |
525 | unsigned int devfn, int where, | |
526 | int size, u32 *val) | |
527 | { | |
528 | void __iomem *addr; | |
529 | int ret; | |
530 | ||
531 | ret = iproc_pcie_map_ep_cfg_reg(pcie->dev, devfn, where & ~0x3, &addr); | |
532 | if (ret) { | |
533 | *val = ~0; | |
534 | return -EINVAL; | |
535 | } | |
536 | ||
537 | *val = readl(addr); | |
538 | ||
539 | if (size <= 2) | |
540 | *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); | |
541 | ||
542 | return 0; | |
543 | } | |
544 | ||
545 | static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, | |
546 | unsigned int devfn, int where, | |
547 | int size, u32 val) | |
548 | { | |
549 | void __iomem *addr; | |
550 | int ret; | |
551 | u32 mask, tmp; | |
552 | ||
553 | ret = iproc_pcie_map_ep_cfg_reg(pcie->dev, devfn, where & ~0x3, &addr); | |
554 | if (ret) | |
555 | return -EINVAL; | |
556 | ||
557 | if (size == 4) { | |
558 | writel(val, addr); | |
559 | return 0; | |
560 | } | |
561 | ||
562 | mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); | |
563 | tmp = readl(addr) & mask; | |
564 | tmp |= val << ((where & 0x3) * 8); | |
565 | writel(tmp, addr); | |
566 | return 0; | |
567 | } | |
568 | ||
569 | /** | |
570 | * iproc_pcie_apb_err_disable() - configure apb error | |
571 | * | |
572 | * APB error forwarding can be disabled during access of configuration | |
573 | * registers of the endpoint device, to prevent unsupported requests | |
574 | * (typically seen during enumeration with multi-function devices) from | |
575 | * triggering a system exception. | |
576 | * | |
577 | * @bus: pcie udevice | |
578 | * @bdf: pdf value | |
579 | * @disabled: flag to enable/disabled apb error | |
580 | */ | |
581 | static inline void iproc_pcie_apb_err_disable(const struct udevice *bus, | |
582 | pci_dev_t bdf, bool disable) | |
583 | { | |
584 | struct iproc_pcie *pcie = dev_get_priv(bus); | |
585 | u32 val; | |
586 | ||
587 | if (PCI_BUS(bdf) && pcie->has_apb_err_disable) { | |
588 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); | |
589 | if (disable) | |
590 | val &= ~APB_ERR_EN; | |
591 | else | |
592 | val |= APB_ERR_EN; | |
593 | iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); | |
594 | } | |
595 | } | |
596 | ||
597 | static int iproc_pcie_config_read32(const struct udevice *bus, pci_dev_t bdf, | |
598 | uint offset, ulong *valuep, | |
599 | enum pci_size_t size) | |
600 | { | |
601 | struct iproc_pcie *pcie = dev_get_priv(bus); | |
602 | int ret; | |
603 | ulong data; | |
604 | ||
605 | iproc_pcie_apb_err_disable(bus, bdf, true); | |
606 | ret = pci_generic_mmap_read_config(bus, iproc_pcie_map_ep_cfg_reg, | |
607 | bdf, offset, &data, PCI_SIZE_32); | |
608 | iproc_pcie_apb_err_disable(bus, bdf, false); | |
609 | if (size <= PCI_SIZE_16) | |
610 | *valuep = (data >> (8 * (offset & 3))) & | |
611 | ((1 << (BIT(size) * 8)) - 1); | |
612 | else | |
613 | *valuep = data; | |
614 | ||
615 | if (!ret && PCI_BUS(bdf) == 0) | |
616 | iproc_pcie_fix_cap(pcie, offset, valuep); | |
617 | ||
618 | return ret; | |
619 | } | |
620 | ||
621 | static int iproc_pcie_config_write32(struct udevice *bus, pci_dev_t bdf, | |
622 | uint offset, ulong value, | |
623 | enum pci_size_t size) | |
624 | { | |
625 | void *addr; | |
626 | ulong mask, tmp; | |
627 | int ret; | |
628 | ||
629 | ret = iproc_pcie_map_ep_cfg_reg(bus, bdf, offset, &addr); | |
630 | if (ret) | |
631 | return ret; | |
632 | ||
633 | if (size == PCI_SIZE_32) { | |
634 | writel(value, addr); | |
635 | return ret; | |
636 | } | |
637 | ||
638 | iproc_pcie_apb_err_disable(bus, bdf, true); | |
639 | mask = ~(((1 << (BIT(size) * 8)) - 1) << ((offset & 0x3) * 8)); | |
640 | tmp = readl(addr) & mask; | |
641 | tmp |= (value << ((offset & 0x3) * 8)); | |
642 | writel(tmp, addr); | |
643 | iproc_pcie_apb_err_disable(bus, bdf, false); | |
644 | ||
645 | return ret; | |
646 | } | |
647 | ||
648 | const static struct dm_pci_ops iproc_pcie_ops = { | |
649 | .read_config = iproc_pcie_config_read32, | |
650 | .write_config = iproc_pcie_config_write32, | |
651 | }; | |
652 | ||
653 | static int iproc_pcie_rev_init(struct iproc_pcie *pcie) | |
654 | { | |
655 | unsigned int reg_idx; | |
656 | const u16 *regs; | |
657 | u16 num_elements; | |
658 | ||
659 | switch (pcie->type) { | |
660 | case IPROC_PCIE_PAXC_V2: | |
661 | pcie->ep_is_internal = true; | |
662 | regs = iproc_pcie_reg_paxc_v2; | |
663 | num_elements = ARRAY_SIZE(iproc_pcie_reg_paxc_v2); | |
664 | break; | |
665 | case IPROC_PCIE_PAXB_V2: | |
666 | regs = iproc_pcie_reg_paxb_v2; | |
667 | num_elements = ARRAY_SIZE(iproc_pcie_reg_paxb_v2); | |
668 | pcie->has_apb_err_disable = true; | |
669 | if (pcie->need_ob_cfg) { | |
670 | pcie->ob.axi_offset = 0; | |
671 | pcie->ob_map = paxb_v2_ob_map; | |
672 | pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); | |
673 | } | |
674 | pcie->need_ib_cfg = true; | |
675 | pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); | |
676 | pcie->ib_map = paxb_v2_ib_map; | |
677 | break; | |
678 | default: | |
679 | dev_dbg(pcie->dev, "incompatible iProc PCIe interface\n"); | |
680 | return -EINVAL; | |
681 | } | |
682 | ||
683 | pcie->reg_offsets = calloc(IPROC_PCIE_MAX_NUM_REG, | |
684 | sizeof(*pcie->reg_offsets)); | |
685 | if (!pcie->reg_offsets) | |
686 | return -ENOMEM; | |
687 | ||
688 | /* go through the register table and populate all valid registers */ | |
689 | pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? | |
690 | IPROC_PCIE_REG_INVALID : regs[0]; | |
691 | for (reg_idx = 1; reg_idx < num_elements; reg_idx++) | |
692 | pcie->reg_offsets[reg_idx] = regs[reg_idx] ? | |
693 | regs[reg_idx] : IPROC_PCIE_REG_INVALID; | |
694 | ||
695 | return 0; | |
696 | } | |
697 | ||
698 | static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, | |
699 | int window_idx) | |
700 | { | |
701 | u32 val; | |
702 | ||
703 | val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); | |
704 | ||
705 | return !!(val & OARR_VALID); | |
706 | } | |
707 | ||
708 | static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, | |
709 | int size_idx, u64 axi_addr, u64 pci_addr) | |
710 | { | |
711 | u16 oarr_offset, omap_offset; | |
712 | ||
713 | /* | |
714 | * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based | |
715 | * on window index. | |
716 | */ | |
717 | oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, | |
718 | window_idx)); | |
719 | omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, | |
720 | window_idx)); | |
721 | if (iproc_pcie_reg_is_invalid(oarr_offset) || | |
722 | iproc_pcie_reg_is_invalid(omap_offset)) | |
723 | return -EINVAL; | |
724 | ||
725 | /* | |
726 | * Program the OARR registers. The upper 32-bit OARR register is | |
727 | * always right after the lower 32-bit OARR register. | |
728 | */ | |
729 | writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | | |
730 | OARR_VALID, pcie->base + oarr_offset); | |
731 | writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); | |
732 | ||
733 | /* now program the OMAP registers */ | |
734 | writel(lower_32_bits(pci_addr), pcie->base + omap_offset); | |
735 | writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); | |
736 | ||
737 | debug("ob window [%d]: offset 0x%x axi %pap pci %pap\n", | |
738 | window_idx, oarr_offset, &axi_addr, &pci_addr); | |
739 | debug("oarr lo 0x%x oarr hi 0x%x\n", | |
740 | readl(pcie->base + oarr_offset), | |
741 | readl(pcie->base + oarr_offset + 4)); | |
742 | debug("omap lo 0x%x omap hi 0x%x\n", | |
743 | readl(pcie->base + omap_offset), | |
744 | readl(pcie->base + omap_offset + 4)); | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
749 | /** | |
750 | * iproc_pcie_setup_ob() - setup outbound address mapping | |
751 | * | |
752 | * Some iProc SoCs require the SW to configure the outbound address mapping | |
753 | * Outbound address translation: | |
754 | * | |
755 | * iproc_pcie_address = axi_address - axi_offset | |
756 | * OARR = iproc_pcie_address | |
757 | * OMAP = pci_addr | |
758 | * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address | |
759 | * | |
760 | * @pcie: pcie device | |
761 | * @axi_addr: axi address to be translated | |
762 | * @pci_addr: pci address | |
763 | * @size: window size | |
764 | * | |
765 | * @return: 0 on success and -ve on failure | |
766 | */ | |
767 | static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, | |
768 | u64 pci_addr, resource_size_t size) | |
769 | { | |
770 | struct iproc_pcie_ob *ob = &pcie->ob; | |
771 | int ret = -EINVAL, window_idx, size_idx; | |
772 | ||
773 | if (axi_addr < ob->axi_offset) { | |
774 | pr_err("axi address %pap less than offset %pap\n", | |
775 | &axi_addr, &ob->axi_offset); | |
776 | return -EINVAL; | |
777 | } | |
778 | ||
779 | /* | |
780 | * Translate the AXI address to the internal address used by the iProc | |
781 | * PCIe core before programming the OARR | |
782 | */ | |
783 | axi_addr -= ob->axi_offset; | |
784 | ||
785 | /* iterate through all OARR/OMAP mapping windows */ | |
786 | for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { | |
787 | const struct iproc_pcie_ob_map *ob_map = | |
788 | &pcie->ob_map[window_idx]; | |
789 | ||
790 | /* | |
791 | * If current outbound window is already in use, move on to the | |
792 | * next one. | |
793 | */ | |
794 | if (iproc_pcie_ob_is_valid(pcie, window_idx)) | |
795 | continue; | |
796 | ||
797 | /* | |
798 | * Iterate through all supported window sizes within the | |
799 | * OARR/OMAP pair to find a match. Go through the window sizes | |
800 | * in a descending order. | |
801 | */ | |
802 | for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; | |
803 | size_idx--) { | |
804 | resource_size_t window_size = | |
805 | ob_map->window_sizes[size_idx] * SZ_1M; | |
806 | ||
807 | /* | |
808 | * Keep iterating until we reach the last window and | |
809 | * with the minimal window size at index zero. In this | |
810 | * case, we take a compromise by mapping it using the | |
811 | * minimum window size that can be supported | |
812 | */ | |
813 | if (size < window_size) { | |
814 | if (size_idx > 0 || window_idx > 0) | |
815 | continue; | |
816 | ||
817 | /* | |
818 | * For the corner case of reaching the minimal | |
819 | * window size that can be supported on the | |
820 | * last window | |
821 | */ | |
822 | axi_addr = ALIGN_DOWN(axi_addr, window_size); | |
823 | pci_addr = ALIGN_DOWN(pci_addr, window_size); | |
824 | size = window_size; | |
825 | } | |
826 | ||
827 | if (!IS_ALIGNED(axi_addr, window_size) || | |
828 | !IS_ALIGNED(pci_addr, window_size)) { | |
829 | pr_err("axi %pap or pci %pap not aligned\n", | |
830 | &axi_addr, &pci_addr); | |
831 | return -EINVAL; | |
832 | } | |
833 | ||
834 | /* | |
835 | * Match found! Program both OARR and OMAP and mark | |
836 | * them as a valid entry. | |
837 | */ | |
838 | ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, | |
839 | axi_addr, pci_addr); | |
840 | if (ret) | |
841 | goto err_ob; | |
842 | ||
843 | size -= window_size; | |
844 | if (size == 0) | |
845 | return 0; | |
846 | ||
847 | /* | |
848 | * If we are here, we are done with the current window, | |
849 | * but not yet finished all mappings. Need to move on | |
850 | * to the next window. | |
851 | */ | |
852 | axi_addr += window_size; | |
853 | pci_addr += window_size; | |
854 | break; | |
855 | } | |
856 | } | |
857 | ||
858 | err_ob: | |
859 | pr_err("unable to configure outbound mapping\n"); | |
860 | pr_err("axi %pap, axi offset %pap, pci %pap, res size %pap\n", | |
861 | &axi_addr, &ob->axi_offset, &pci_addr, &size); | |
862 | ||
863 | return ret; | |
864 | } | |
865 | ||
866 | static int iproc_pcie_map_ranges(struct udevice *dev) | |
867 | { | |
868 | struct iproc_pcie *pcie = dev_get_priv(dev); | |
869 | struct udevice *bus = pci_get_controller(dev); | |
870 | struct pci_controller *hose = dev_get_uclass_priv(bus); | |
871 | int i, ret; | |
872 | ||
873 | for (i = 0; i < hose->region_count; i++) { | |
874 | if (hose->regions[i].flags == PCI_REGION_MEM || | |
875 | hose->regions[i].flags == PCI_REGION_PREFETCH) { | |
2cb32607 | 876 | debug("%d: bus_addr %p, axi_addr %p, size 0x%llx\n", |
4848704a SM |
877 | i, &hose->regions[i].bus_start, |
878 | &hose->regions[i].phys_start, | |
879 | hose->regions[i].size); | |
880 | ret = iproc_pcie_setup_ob(pcie, | |
881 | hose->regions[i].phys_start, | |
882 | hose->regions[i].bus_start, | |
883 | hose->regions[i].size); | |
884 | if (ret) | |
885 | return ret; | |
886 | } | |
887 | } | |
888 | ||
889 | return 0; | |
890 | } | |
891 | ||
892 | static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, | |
893 | int region_idx) | |
894 | { | |
895 | const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; | |
896 | u32 val; | |
897 | ||
898 | val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); | |
899 | ||
900 | return !!(val & (BIT(ib_map->nr_sizes) - 1)); | |
901 | } | |
902 | ||
903 | static inline bool | |
904 | iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, | |
905 | enum iproc_pcie_ib_map_type type) | |
906 | { | |
907 | return !!(ib_map->type == type); | |
908 | } | |
909 | ||
910 | static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, | |
911 | int size_idx, int nr_windows, u64 axi_addr, | |
912 | u64 pci_addr, resource_size_t size) | |
913 | { | |
914 | const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; | |
915 | u16 iarr_offset, imap_offset; | |
916 | u32 val; | |
917 | int window_idx; | |
918 | ||
919 | iarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_IARR0, | |
920 | region_idx)); | |
921 | imap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_IMAP0, | |
922 | region_idx)); | |
923 | if (iproc_pcie_reg_is_invalid(iarr_offset) || | |
924 | iproc_pcie_reg_is_invalid(imap_offset)) | |
925 | return -EINVAL; | |
926 | ||
927 | debug("ib region [%d]: offset 0x%x axi %pap pci %pap\n", | |
928 | region_idx, iarr_offset, &axi_addr, &pci_addr); | |
929 | ||
930 | /* | |
931 | * Program the IARR registers. The upper 32-bit IARR register is | |
932 | * always right after the lower 32-bit IARR register. | |
933 | */ | |
934 | writel(lower_32_bits(pci_addr) | BIT(size_idx), | |
935 | pcie->base + iarr_offset); | |
936 | writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); | |
937 | ||
938 | debug("iarr lo 0x%x iarr hi 0x%x\n", | |
939 | readl(pcie->base + iarr_offset), | |
940 | readl(pcie->base + iarr_offset + 4)); | |
941 | ||
942 | /* | |
943 | * Now program the IMAP registers. Each IARR region may have one or | |
944 | * more IMAP windows. | |
945 | */ | |
946 | size >>= ilog2(nr_windows); | |
947 | for (window_idx = 0; window_idx < nr_windows; window_idx++) { | |
948 | val = readl(pcie->base + imap_offset); | |
949 | val |= lower_32_bits(axi_addr) | IMAP_VALID; | |
950 | writel(val, pcie->base + imap_offset); | |
951 | writel(upper_32_bits(axi_addr), | |
952 | pcie->base + imap_offset + ib_map->imap_addr_offset); | |
953 | ||
954 | debug("imap window [%d] lo 0x%x hi 0x%x\n", | |
955 | window_idx, readl(pcie->base + imap_offset), | |
956 | readl(pcie->base + imap_offset + | |
957 | ib_map->imap_addr_offset)); | |
958 | ||
959 | imap_offset += ib_map->imap_window_offset; | |
960 | axi_addr += size; | |
961 | } | |
962 | ||
963 | return 0; | |
964 | } | |
965 | ||
966 | /** | |
967 | * iproc_pcie_setup_ib() - setup inbound address mapping | |
968 | * | |
969 | * @pcie: pcie device | |
970 | * @axi_addr: axi address to be translated | |
971 | * @pci_addr: pci address | |
972 | * @size: window size | |
973 | * @type: inbound mapping type | |
974 | * | |
975 | * @return: 0 on success and -ve on failure | |
976 | */ | |
977 | static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, u64 axi_addr, | |
978 | u64 pci_addr, resource_size_t size, | |
979 | enum iproc_pcie_ib_map_type type) | |
980 | { | |
981 | struct iproc_pcie_ib *ib = &pcie->ib; | |
982 | int ret; | |
983 | unsigned int region_idx, size_idx; | |
984 | ||
985 | /* iterate through all IARR mapping regions */ | |
986 | for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { | |
987 | const struct iproc_pcie_ib_map *ib_map = | |
988 | &pcie->ib_map[region_idx]; | |
989 | ||
990 | /* | |
991 | * If current inbound region is already in use or not a | |
992 | * compatible type, move on to the next. | |
993 | */ | |
994 | if (iproc_pcie_ib_is_in_use(pcie, region_idx) || | |
995 | !iproc_pcie_ib_check_type(ib_map, type)) | |
996 | continue; | |
997 | ||
998 | /* iterate through all supported region sizes to find a match */ | |
999 | for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { | |
1000 | resource_size_t region_size = | |
1001 | ib_map->region_sizes[size_idx] * ib_map->size_unit; | |
1002 | ||
1003 | if (size != region_size) | |
1004 | continue; | |
1005 | ||
1006 | if (!IS_ALIGNED(axi_addr, region_size) || | |
1007 | !IS_ALIGNED(pci_addr, region_size)) { | |
1008 | pr_err("axi %pap or pci %pap not aligned\n", | |
1009 | &axi_addr, &pci_addr); | |
1010 | return -EINVAL; | |
1011 | } | |
1012 | ||
1013 | /* Match found! Program IARR and all IMAP windows. */ | |
1014 | ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, | |
1015 | ib_map->nr_windows, axi_addr, | |
1016 | pci_addr, size); | |
1017 | if (ret) | |
1018 | goto err_ib; | |
1019 | else | |
1020 | return 0; | |
1021 | } | |
1022 | } | |
1023 | ret = -EINVAL; | |
1024 | ||
1025 | err_ib: | |
1026 | pr_err("unable to configure inbound mapping\n"); | |
1027 | pr_err("axi %pap, pci %pap, res size %pap\n", | |
1028 | &axi_addr, &pci_addr, &size); | |
1029 | ||
1030 | return ret; | |
1031 | } | |
1032 | ||
1033 | static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) | |
1034 | { | |
1035 | int ret; | |
1036 | struct pci_region regions; | |
1037 | int i = 0; | |
1038 | ||
1039 | while (!pci_get_dma_regions(pcie->dev, ®ions, i)) { | |
1040 | dev_dbg(pcie->dev, | |
2cb32607 | 1041 | "dma %d: bus_addr %#llx, axi_addr %#llx, size %#llx\n", |
4848704a SM |
1042 | i, regions.bus_start, regions.phys_start, regions.size); |
1043 | ||
1044 | /* Each range entry corresponds to an inbound mapping region */ | |
1045 | ret = iproc_pcie_setup_ib(pcie, regions.phys_start, | |
1046 | regions.bus_start, | |
1047 | regions.size, | |
1048 | IPROC_PCIE_IB_MAP_MEM); | |
1049 | if (ret) | |
1050 | return ret; | |
1051 | i++; | |
1052 | } | |
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static void iproc_pcie_reset_map_regs(struct iproc_pcie *pcie) | |
1057 | { | |
1058 | struct iproc_pcie_ib *ib = &pcie->ib; | |
1059 | struct iproc_pcie_ob *ob = &pcie->ob; | |
1060 | int window_idx, region_idx; | |
1061 | ||
1062 | if (pcie->ep_is_internal) | |
1063 | return; | |
1064 | ||
1065 | /* iterate through all OARR mapping regions */ | |
1066 | for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { | |
1067 | iproc_pcie_write_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, | |
1068 | window_idx), 0); | |
1069 | } | |
1070 | ||
1071 | /* iterate through all IARR mapping regions */ | |
1072 | for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { | |
1073 | iproc_pcie_write_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, | |
1074 | region_idx), 0); | |
1075 | } | |
1076 | } | |
1077 | ||
1078 | static void iproc_pcie_reset(struct iproc_pcie *pcie) | |
1079 | { | |
1080 | u32 val; | |
1081 | ||
1082 | /* | |
1083 | * PAXC and the internal emulated endpoint device downstream should not | |
1084 | * be reset. If firmware has been loaded on the endpoint device at an | |
1085 | * earlier boot stage, reset here causes issues. | |
1086 | */ | |
1087 | if (pcie->ep_is_internal) | |
1088 | return; | |
1089 | ||
1090 | /* | |
1091 | * Select perst_b signal as reset source. Put the device into reset, | |
1092 | * and then bring it out of reset | |
1093 | */ | |
1094 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); | |
1095 | val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & | |
1096 | ~RC_PCIE_RST_OUTPUT; | |
1097 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); | |
1098 | udelay(250); | |
1099 | ||
1100 | val |= RC_PCIE_RST_OUTPUT; | |
1101 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); | |
1102 | mdelay(100); | |
1103 | } | |
1104 | ||
1105 | static inline bool iproc_pcie_link_is_active(struct iproc_pcie *pcie) | |
1106 | { | |
1107 | u32 val; | |
1108 | ||
1109 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); | |
1110 | return !!((val & PCIE_PHYLINKUP) && (val & PCIE_DL_ACTIVE)); | |
1111 | } | |
1112 | ||
1113 | static int iproc_pcie_check_link(struct iproc_pcie *pcie) | |
1114 | { | |
1115 | u32 link_status, class; | |
1116 | ||
1117 | pcie->link_is_active = false; | |
253373d3 | 1118 | /* force class to PCI bridge Normal decode (0x060400) */ |
4848704a | 1119 | #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c |
253373d3 | 1120 | #define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff |
4848704a SM |
1121 | iproc_pci_raw_config_read32(pcie, 0, |
1122 | PCI_BRIDGE_CTRL_REG_OFFSET, | |
1123 | 4, &class); | |
253373d3 | 1124 | class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; |
d7b90409 | 1125 | class |= PCI_CLASS_BRIDGE_PCI_NORMAL; |
4848704a SM |
1126 | iproc_pci_raw_config_write32(pcie, 0, |
1127 | PCI_BRIDGE_CTRL_REG_OFFSET, | |
1128 | 4, class); | |
1129 | ||
1130 | /* | |
1131 | * PAXC connects to emulated endpoint devices directly and does not | |
1132 | * have a Serdes. Therefore skip the link detection logic here. | |
1133 | */ | |
1134 | if (pcie->ep_is_internal) { | |
1135 | pcie->link_is_active = true; | |
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | if (!iproc_pcie_link_is_active(pcie)) { | |
1140 | pr_err("PHY or data link is INACTIVE!\n"); | |
1141 | return -ENODEV; | |
1142 | } | |
1143 | ||
1144 | #define PCI_TARGET_LINK_SPEED_MASK 0xf | |
1145 | #define PCI_TARGET_LINK_WIDTH_MASK 0x3f | |
1146 | #define PCI_TARGET_LINK_WIDTH_OFFSET 0x4 | |
1147 | ||
1148 | /* check link status to see if link is active */ | |
1149 | iproc_pci_raw_config_read32(pcie, 0, | |
1150 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, | |
1151 | 2, &link_status); | |
1152 | if (link_status & PCI_EXP_LNKSTA_NLW) | |
1153 | pcie->link_is_active = true; | |
1154 | ||
1155 | if (pcie->link_is_active) | |
1156 | pr_info("link UP @ Speed Gen-%d and width-x%d\n", | |
1157 | link_status & PCI_TARGET_LINK_SPEED_MASK, | |
1158 | (link_status >> PCI_TARGET_LINK_WIDTH_OFFSET) & | |
1159 | PCI_TARGET_LINK_WIDTH_MASK); | |
1160 | else | |
1161 | pr_info("link DOWN\n"); | |
1162 | ||
1163 | return 0; | |
1164 | } | |
1165 | ||
1166 | static int iproc_pcie_probe(struct udevice *dev) | |
1167 | { | |
1168 | struct iproc_pcie *pcie = dev_get_priv(dev); | |
1169 | int ret; | |
1170 | ||
1171 | pcie->type = (enum iproc_pcie_type)dev_get_driver_data(dev); | |
1172 | debug("PAX type %d\n", pcie->type); | |
1173 | pcie->base = dev_read_addr_ptr(dev); | |
1174 | debug("PAX reg base %p\n", pcie->base); | |
1175 | ||
1176 | if (!pcie->base) | |
1177 | return -ENODEV; | |
1178 | ||
1179 | if (dev_read_bool(dev, "brcm,pcie-ob")) | |
1180 | pcie->need_ob_cfg = true; | |
1181 | ||
1182 | pcie->dev = dev; | |
1183 | ret = iproc_pcie_rev_init(pcie); | |
1184 | if (ret) | |
1185 | return ret; | |
1186 | ||
1187 | if (!pcie->ep_is_internal) { | |
1188 | ret = generic_phy_get_by_name(dev, "pcie-phy", &pcie->phy); | |
1189 | if (!ret) { | |
1190 | ret = generic_phy_init(&pcie->phy); | |
1191 | if (ret) { | |
1192 | pr_err("failed to init %s PHY\n", dev->name); | |
1193 | return ret; | |
1194 | } | |
1195 | ||
1196 | ret = generic_phy_power_on(&pcie->phy); | |
1197 | if (ret) { | |
1198 | pr_err("power on %s PHY failed\n", dev->name); | |
1199 | goto err_exit_phy; | |
1200 | } | |
1201 | } | |
1202 | } | |
1203 | ||
1204 | iproc_pcie_reset(pcie); | |
1205 | ||
1206 | if (pcie->need_ob_cfg) { | |
1207 | ret = iproc_pcie_map_ranges(dev); | |
1208 | if (ret) { | |
1209 | pr_err("outbound map failed\n"); | |
1210 | goto err_power_off_phy; | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | if (pcie->need_ib_cfg) { | |
1215 | ret = iproc_pcie_map_dma_ranges(pcie); | |
1216 | if (ret) { | |
1217 | pr_err("inbound map failed\n"); | |
1218 | goto err_power_off_phy; | |
1219 | } | |
1220 | } | |
1221 | ||
1222 | if (iproc_pcie_check_link(pcie)) | |
1223 | pr_info("no PCIe EP device detected\n"); | |
1224 | ||
1225 | return 0; | |
1226 | ||
1227 | err_power_off_phy: | |
1228 | generic_phy_power_off(&pcie->phy); | |
1229 | err_exit_phy: | |
1230 | generic_phy_exit(&pcie->phy); | |
1231 | return ret; | |
1232 | } | |
1233 | ||
1234 | static int iproc_pcie_remove(struct udevice *dev) | |
1235 | { | |
1236 | struct iproc_pcie *pcie = dev_get_priv(dev); | |
1237 | int ret; | |
1238 | ||
1239 | iproc_pcie_reset_map_regs(pcie); | |
1240 | ||
1241 | if (generic_phy_valid(&pcie->phy)) { | |
1242 | ret = generic_phy_power_off(&pcie->phy); | |
1243 | if (ret) { | |
1244 | pr_err("failed to power off PCIe phy\n"); | |
1245 | return ret; | |
1246 | } | |
1247 | ||
1248 | ret = generic_phy_exit(&pcie->phy); | |
1249 | if (ret) { | |
1250 | pr_err("failed to power off PCIe phy\n"); | |
1251 | return ret; | |
1252 | } | |
1253 | } | |
1254 | ||
1255 | return 0; | |
1256 | } | |
1257 | ||
1258 | static const struct udevice_id pci_iproc_ids[] = { | |
1259 | { .compatible = "brcm,iproc-pcie-paxb-v2", | |
1260 | .data = IPROC_PCIE_PAXB_V2 }, | |
1261 | { .compatible = "brcm,iproc-pcie-paxc-v2", | |
1262 | .data = IPROC_PCIE_PAXC_V2 }, | |
1263 | { } | |
1264 | }; | |
1265 | ||
1266 | U_BOOT_DRIVER(pci_iproc) = { | |
1267 | .name = "pci_iproc", | |
1268 | .id = UCLASS_PCI, | |
1269 | .of_match = pci_iproc_ids, | |
1270 | .ops = &iproc_pcie_ops, | |
1271 | .probe = iproc_pcie_probe, | |
1272 | .remove = iproc_pcie_remove, | |
41575d8e | 1273 | .priv_auto = sizeof(struct iproc_pcie), |
4d7bab1a | 1274 | .flags = DM_FLAG_OS_PREPARE, |
4848704a | 1275 | }; |