]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
ff3e077b SG |
2 | /* |
3 | * Copyright (c) 2014 Google, Inc | |
4 | * Written by Simon Glass <[email protected]> | |
ff3e077b SG |
5 | */ |
6 | ||
7 | #include <common.h> | |
8 | #include <dm.h> | |
9 | #include <errno.h> | |
691d719d | 10 | #include <init.h> |
f7ae49fc | 11 | #include <log.h> |
336d4615 | 12 | #include <malloc.h> |
ff3e077b | 13 | #include <pci.h> |
21d1fe7e | 14 | #include <asm/io.h> |
ff3e077b | 15 | #include <dm/device-internal.h> |
bf501595 | 16 | #include <dm/lists.h> |
348b744b | 17 | #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) |
07f2f58b | 18 | #include <asm/fsp/fsp_support.h> |
348b744b | 19 | #endif |
c05ed00a | 20 | #include <linux/delay.h> |
5e23b8b4 | 21 | #include "pci_internal.h" |
ff3e077b SG |
22 | |
23 | DECLARE_GLOBAL_DATA_PTR; | |
24 | ||
a6eb93b3 | 25 | int pci_get_bus(int busnum, struct udevice **busp) |
983c6ba2 SG |
26 | { |
27 | int ret; | |
28 | ||
29 | ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); | |
30 | ||
31 | /* Since buses may not be numbered yet try a little harder with bus 0 */ | |
32 | if (ret == -ENODEV) { | |
3f603cbb | 33 | ret = uclass_first_device_err(UCLASS_PCI, busp); |
983c6ba2 SG |
34 | if (ret) |
35 | return ret; | |
983c6ba2 SG |
36 | ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); |
37 | } | |
38 | ||
39 | return ret; | |
40 | } | |
41 | ||
9f60fb0d SG |
42 | struct udevice *pci_get_controller(struct udevice *dev) |
43 | { | |
44 | while (device_is_on_pci_bus(dev)) | |
45 | dev = dev->parent; | |
46 | ||
47 | return dev; | |
48 | } | |
49 | ||
194fca91 | 50 | pci_dev_t dm_pci_get_bdf(const struct udevice *dev) |
4b515e4f | 51 | { |
8a8d24bd | 52 | struct pci_child_plat *pplat = dev_get_parent_plat(dev); |
4b515e4f SG |
53 | struct udevice *bus = dev->parent; |
54 | ||
4886287e SG |
55 | /* |
56 | * This error indicates that @dev is a device on an unprobed PCI bus. | |
57 | * The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below | |
58 | * will produce a bad BDF> | |
59 | * | |
60 | * A common cause of this problem is that this function is called in the | |
d1998a9f | 61 | * of_to_plat() method of @dev. Accessing the PCI bus in that |
4886287e SG |
62 | * method is not allowed, since it has not yet been probed. To fix this, |
63 | * move that access to the probe() method of @dev instead. | |
64 | */ | |
65 | if (!device_active(bus)) | |
66 | log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name, | |
67 | bus->name); | |
4b515e4f SG |
68 | return PCI_ADD_BUS(bus->seq, pplat->devfn); |
69 | } | |
70 | ||
ff3e077b SG |
71 | /** |
72 | * pci_get_bus_max() - returns the bus number of the last active bus | |
73 | * | |
74 | * @return last bus number, or -1 if no active buses | |
75 | */ | |
76 | static int pci_get_bus_max(void) | |
77 | { | |
78 | struct udevice *bus; | |
79 | struct uclass *uc; | |
80 | int ret = -1; | |
81 | ||
82 | ret = uclass_get(UCLASS_PCI, &uc); | |
83 | uclass_foreach_dev(bus, uc) { | |
84 | if (bus->seq > ret) | |
85 | ret = bus->seq; | |
86 | } | |
87 | ||
88 | debug("%s: ret=%d\n", __func__, ret); | |
89 | ||
90 | return ret; | |
91 | } | |
92 | ||
93 | int pci_last_busno(void) | |
94 | { | |
069155cb | 95 | return pci_get_bus_max(); |
ff3e077b SG |
96 | } |
97 | ||
98 | int pci_get_ff(enum pci_size_t size) | |
99 | { | |
100 | switch (size) { | |
101 | case PCI_SIZE_8: | |
102 | return 0xff; | |
103 | case PCI_SIZE_16: | |
104 | return 0xffff; | |
105 | default: | |
106 | return 0xffffffff; | |
107 | } | |
108 | } | |
109 | ||
02e4d38d MV |
110 | static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf, |
111 | ofnode *rnode) | |
112 | { | |
113 | struct fdt_pci_addr addr; | |
114 | ofnode node; | |
115 | int ret; | |
116 | ||
117 | dev_for_each_subnode(node, bus) { | |
118 | ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg", | |
119 | &addr); | |
120 | if (ret) | |
121 | continue; | |
122 | ||
123 | if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf)) | |
124 | continue; | |
125 | ||
126 | *rnode = node; | |
127 | break; | |
128 | } | |
129 | }; | |
130 | ||
c4e72c4a | 131 | int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn, |
ff3e077b SG |
132 | struct udevice **devp) |
133 | { | |
134 | struct udevice *dev; | |
135 | ||
136 | for (device_find_first_child(bus, &dev); | |
137 | dev; | |
138 | device_find_next_child(&dev)) { | |
8a8d24bd | 139 | struct pci_child_plat *pplat; |
ff3e077b | 140 | |
caa4daa2 | 141 | pplat = dev_get_parent_plat(dev); |
ff3e077b SG |
142 | if (pplat && pplat->devfn == find_devfn) { |
143 | *devp = dev; | |
144 | return 0; | |
145 | } | |
146 | } | |
147 | ||
148 | return -ENODEV; | |
149 | } | |
150 | ||
f3f1faef | 151 | int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) |
ff3e077b SG |
152 | { |
153 | struct udevice *bus; | |
154 | int ret; | |
155 | ||
983c6ba2 | 156 | ret = pci_get_bus(PCI_BUS(bdf), &bus); |
ff3e077b SG |
157 | if (ret) |
158 | return ret; | |
159 | return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); | |
160 | } | |
161 | ||
162 | static int pci_device_matches_ids(struct udevice *dev, | |
163 | struct pci_device_id *ids) | |
164 | { | |
8a8d24bd | 165 | struct pci_child_plat *pplat; |
ff3e077b SG |
166 | int i; |
167 | ||
caa4daa2 | 168 | pplat = dev_get_parent_plat(dev); |
ff3e077b SG |
169 | if (!pplat) |
170 | return -EINVAL; | |
171 | for (i = 0; ids[i].vendor != 0; i++) { | |
172 | if (pplat->vendor == ids[i].vendor && | |
173 | pplat->device == ids[i].device) | |
174 | return i; | |
175 | } | |
176 | ||
177 | return -EINVAL; | |
178 | } | |
179 | ||
180 | int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, | |
181 | int *indexp, struct udevice **devp) | |
182 | { | |
183 | struct udevice *dev; | |
184 | ||
185 | /* Scan all devices on this bus */ | |
186 | for (device_find_first_child(bus, &dev); | |
187 | dev; | |
188 | device_find_next_child(&dev)) { | |
189 | if (pci_device_matches_ids(dev, ids) >= 0) { | |
190 | if ((*indexp)-- <= 0) { | |
191 | *devp = dev; | |
192 | return 0; | |
193 | } | |
194 | } | |
195 | } | |
196 | ||
197 | return -ENODEV; | |
198 | } | |
199 | ||
200 | int pci_find_device_id(struct pci_device_id *ids, int index, | |
201 | struct udevice **devp) | |
202 | { | |
203 | struct udevice *bus; | |
204 | ||
205 | /* Scan all known buses */ | |
206 | for (uclass_first_device(UCLASS_PCI, &bus); | |
207 | bus; | |
208 | uclass_next_device(&bus)) { | |
209 | if (!pci_bus_find_devices(bus, ids, &index, devp)) | |
210 | return 0; | |
211 | } | |
212 | *devp = NULL; | |
213 | ||
214 | return -ENODEV; | |
215 | } | |
216 | ||
5c0bf647 SG |
217 | static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, |
218 | unsigned int device, int *indexp, | |
219 | struct udevice **devp) | |
220 | { | |
8a8d24bd | 221 | struct pci_child_plat *pplat; |
5c0bf647 SG |
222 | struct udevice *dev; |
223 | ||
224 | for (device_find_first_child(bus, &dev); | |
225 | dev; | |
226 | device_find_next_child(&dev)) { | |
caa4daa2 | 227 | pplat = dev_get_parent_plat(dev); |
5c0bf647 SG |
228 | if (pplat->vendor == vendor && pplat->device == device) { |
229 | if (!(*indexp)--) { | |
230 | *devp = dev; | |
231 | return 0; | |
232 | } | |
233 | } | |
234 | } | |
235 | ||
236 | return -ENODEV; | |
237 | } | |
238 | ||
239 | int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, | |
240 | struct udevice **devp) | |
241 | { | |
242 | struct udevice *bus; | |
243 | ||
244 | /* Scan all known buses */ | |
245 | for (uclass_first_device(UCLASS_PCI, &bus); | |
246 | bus; | |
247 | uclass_next_device(&bus)) { | |
248 | if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) | |
249 | return device_probe(*devp); | |
250 | } | |
251 | *devp = NULL; | |
252 | ||
253 | return -ENODEV; | |
254 | } | |
255 | ||
a0eb8356 SG |
256 | int dm_pci_find_class(uint find_class, int index, struct udevice **devp) |
257 | { | |
258 | struct udevice *dev; | |
259 | ||
260 | /* Scan all known buses */ | |
261 | for (pci_find_first_device(&dev); | |
262 | dev; | |
263 | pci_find_next_device(&dev)) { | |
8a8d24bd | 264 | struct pci_child_plat *pplat = dev_get_parent_plat(dev); |
a0eb8356 SG |
265 | |
266 | if (pplat->class == find_class && !index--) { | |
267 | *devp = dev; | |
268 | return device_probe(*devp); | |
269 | } | |
270 | } | |
271 | *devp = NULL; | |
272 | ||
273 | return -ENODEV; | |
274 | } | |
275 | ||
ff3e077b SG |
276 | int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, |
277 | unsigned long value, enum pci_size_t size) | |
278 | { | |
279 | struct dm_pci_ops *ops; | |
280 | ||
281 | ops = pci_get_ops(bus); | |
282 | if (!ops->write_config) | |
283 | return -ENOSYS; | |
284 | return ops->write_config(bus, bdf, offset, value, size); | |
285 | } | |
286 | ||
319dba1f SG |
287 | int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset, |
288 | u32 clr, u32 set) | |
289 | { | |
290 | ulong val; | |
291 | int ret; | |
292 | ||
293 | ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32); | |
294 | if (ret) | |
295 | return ret; | |
296 | val &= ~clr; | |
297 | val |= set; | |
298 | ||
299 | return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32); | |
300 | } | |
301 | ||
ff3e077b SG |
302 | int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, |
303 | enum pci_size_t size) | |
304 | { | |
305 | struct udevice *bus; | |
306 | int ret; | |
307 | ||
983c6ba2 | 308 | ret = pci_get_bus(PCI_BUS(bdf), &bus); |
ff3e077b SG |
309 | if (ret) |
310 | return ret; | |
311 | ||
4d8615cb | 312 | return pci_bus_write_config(bus, bdf, offset, value, size); |
ff3e077b SG |
313 | } |
314 | ||
66afb4ed SG |
315 | int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, |
316 | enum pci_size_t size) | |
317 | { | |
318 | struct udevice *bus; | |
319 | ||
1e0f2263 | 320 | for (bus = dev; device_is_on_pci_bus(bus);) |
66afb4ed | 321 | bus = bus->parent; |
21ccce1b SG |
322 | return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, |
323 | size); | |
66afb4ed SG |
324 | } |
325 | ||
ff3e077b SG |
326 | int pci_write_config32(pci_dev_t bdf, int offset, u32 value) |
327 | { | |
328 | return pci_write_config(bdf, offset, value, PCI_SIZE_32); | |
329 | } | |
330 | ||
331 | int pci_write_config16(pci_dev_t bdf, int offset, u16 value) | |
332 | { | |
333 | return pci_write_config(bdf, offset, value, PCI_SIZE_16); | |
334 | } | |
335 | ||
336 | int pci_write_config8(pci_dev_t bdf, int offset, u8 value) | |
337 | { | |
338 | return pci_write_config(bdf, offset, value, PCI_SIZE_8); | |
339 | } | |
340 | ||
66afb4ed SG |
341 | int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) |
342 | { | |
343 | return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); | |
344 | } | |
345 | ||
346 | int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) | |
347 | { | |
348 | return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); | |
349 | } | |
350 | ||
351 | int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) | |
352 | { | |
353 | return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); | |
354 | } | |
355 | ||
194fca91 | 356 | int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset, |
ff3e077b SG |
357 | unsigned long *valuep, enum pci_size_t size) |
358 | { | |
359 | struct dm_pci_ops *ops; | |
360 | ||
361 | ops = pci_get_ops(bus); | |
362 | if (!ops->read_config) | |
363 | return -ENOSYS; | |
364 | return ops->read_config(bus, bdf, offset, valuep, size); | |
365 | } | |
366 | ||
367 | int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, | |
368 | enum pci_size_t size) | |
369 | { | |
370 | struct udevice *bus; | |
371 | int ret; | |
372 | ||
983c6ba2 | 373 | ret = pci_get_bus(PCI_BUS(bdf), &bus); |
ff3e077b SG |
374 | if (ret) |
375 | return ret; | |
376 | ||
4d8615cb | 377 | return pci_bus_read_config(bus, bdf, offset, valuep, size); |
ff3e077b SG |
378 | } |
379 | ||
194fca91 SG |
380 | int dm_pci_read_config(const struct udevice *dev, int offset, |
381 | unsigned long *valuep, enum pci_size_t size) | |
66afb4ed | 382 | { |
194fca91 | 383 | const struct udevice *bus; |
66afb4ed | 384 | |
1e0f2263 | 385 | for (bus = dev; device_is_on_pci_bus(bus);) |
66afb4ed | 386 | bus = bus->parent; |
21ccce1b | 387 | return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, |
66afb4ed SG |
388 | size); |
389 | } | |
390 | ||
ff3e077b SG |
391 | int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) |
392 | { | |
393 | unsigned long value; | |
394 | int ret; | |
395 | ||
396 | ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); | |
397 | if (ret) | |
398 | return ret; | |
399 | *valuep = value; | |
400 | ||
401 | return 0; | |
402 | } | |
403 | ||
404 | int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) | |
405 | { | |
406 | unsigned long value; | |
407 | int ret; | |
408 | ||
409 | ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); | |
410 | if (ret) | |
411 | return ret; | |
412 | *valuep = value; | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
417 | int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) | |
418 | { | |
419 | unsigned long value; | |
420 | int ret; | |
421 | ||
422 | ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); | |
423 | if (ret) | |
424 | return ret; | |
425 | *valuep = value; | |
426 | ||
427 | return 0; | |
428 | } | |
429 | ||
194fca91 | 430 | int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep) |
66afb4ed SG |
431 | { |
432 | unsigned long value; | |
433 | int ret; | |
434 | ||
435 | ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); | |
436 | if (ret) | |
437 | return ret; | |
438 | *valuep = value; | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
194fca91 | 443 | int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep) |
66afb4ed SG |
444 | { |
445 | unsigned long value; | |
446 | int ret; | |
447 | ||
448 | ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); | |
449 | if (ret) | |
450 | return ret; | |
451 | *valuep = value; | |
452 | ||
453 | return 0; | |
454 | } | |
455 | ||
194fca91 | 456 | int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep) |
66afb4ed SG |
457 | { |
458 | unsigned long value; | |
459 | int ret; | |
460 | ||
461 | ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); | |
462 | if (ret) | |
463 | return ret; | |
464 | *valuep = value; | |
465 | ||
466 | return 0; | |
467 | } | |
468 | ||
319dba1f SG |
469 | int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set) |
470 | { | |
471 | u8 val; | |
472 | int ret; | |
473 | ||
474 | ret = dm_pci_read_config8(dev, offset, &val); | |
475 | if (ret) | |
476 | return ret; | |
477 | val &= ~clr; | |
478 | val |= set; | |
479 | ||
480 | return dm_pci_write_config8(dev, offset, val); | |
481 | } | |
482 | ||
483 | int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set) | |
484 | { | |
485 | u16 val; | |
486 | int ret; | |
487 | ||
488 | ret = dm_pci_read_config16(dev, offset, &val); | |
489 | if (ret) | |
490 | return ret; | |
491 | val &= ~clr; | |
492 | val |= set; | |
493 | ||
494 | return dm_pci_write_config16(dev, offset, val); | |
495 | } | |
496 | ||
497 | int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set) | |
498 | { | |
499 | u32 val; | |
500 | int ret; | |
501 | ||
502 | ret = dm_pci_read_config32(dev, offset, &val); | |
503 | if (ret) | |
504 | return ret; | |
505 | val &= ~clr; | |
506 | val |= set; | |
507 | ||
508 | return dm_pci_write_config32(dev, offset, val); | |
509 | } | |
510 | ||
bbbcb526 BM |
511 | static void set_vga_bridge_bits(struct udevice *dev) |
512 | { | |
513 | struct udevice *parent = dev->parent; | |
514 | u16 bc; | |
515 | ||
516 | while (parent->seq != 0) { | |
517 | dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); | |
518 | bc |= PCI_BRIDGE_CTL_VGA; | |
519 | dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); | |
520 | parent = parent->parent; | |
521 | } | |
522 | } | |
523 | ||
ff3e077b SG |
524 | int pci_auto_config_devices(struct udevice *bus) |
525 | { | |
526 | struct pci_controller *hose = bus->uclass_priv; | |
8a8d24bd | 527 | struct pci_child_plat *pplat; |
ff3e077b SG |
528 | unsigned int sub_bus; |
529 | struct udevice *dev; | |
530 | int ret; | |
531 | ||
532 | sub_bus = bus->seq; | |
533 | debug("%s: start\n", __func__); | |
534 | pciauto_config_init(hose); | |
535 | for (ret = device_find_first_child(bus, &dev); | |
536 | !ret && dev; | |
537 | ret = device_find_next_child(&dev)) { | |
ff3e077b | 538 | unsigned int max_bus; |
4d21455e | 539 | int ret; |
ff3e077b | 540 | |
ff3e077b | 541 | debug("%s: device %s\n", __func__, dev->name); |
f0c36928 SG |
542 | if (dev_of_valid(dev) && |
543 | dev_read_bool(dev, "pci,no-autoconfig")) | |
d8c7fb50 | 544 | continue; |
5e23b8b4 | 545 | ret = dm_pciauto_config_device(dev); |
4d21455e SG |
546 | if (ret < 0) |
547 | return ret; | |
548 | max_bus = ret; | |
ff3e077b | 549 | sub_bus = max(sub_bus, max_bus); |
bbbcb526 | 550 | |
caa4daa2 | 551 | pplat = dev_get_parent_plat(dev); |
bbbcb526 BM |
552 | if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) |
553 | set_vga_bridge_bits(dev); | |
ff3e077b SG |
554 | } |
555 | debug("%s: done\n", __func__); | |
556 | ||
557 | return sub_bus; | |
558 | } | |
559 | ||
badb9922 | 560 | int pci_generic_mmap_write_config( |
c4e72c4a SG |
561 | const struct udevice *bus, |
562 | int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset, | |
563 | void **addrp), | |
badb9922 TT |
564 | pci_dev_t bdf, |
565 | uint offset, | |
566 | ulong value, | |
567 | enum pci_size_t size) | |
568 | { | |
569 | void *address; | |
570 | ||
571 | if (addr_f(bus, bdf, offset, &address) < 0) | |
572 | return 0; | |
573 | ||
574 | switch (size) { | |
575 | case PCI_SIZE_8: | |
576 | writeb(value, address); | |
577 | return 0; | |
578 | case PCI_SIZE_16: | |
579 | writew(value, address); | |
580 | return 0; | |
581 | case PCI_SIZE_32: | |
582 | writel(value, address); | |
583 | return 0; | |
584 | default: | |
585 | return -EINVAL; | |
586 | } | |
587 | } | |
588 | ||
589 | int pci_generic_mmap_read_config( | |
c4e72c4a SG |
590 | const struct udevice *bus, |
591 | int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset, | |
592 | void **addrp), | |
badb9922 TT |
593 | pci_dev_t bdf, |
594 | uint offset, | |
595 | ulong *valuep, | |
596 | enum pci_size_t size) | |
597 | { | |
598 | void *address; | |
599 | ||
600 | if (addr_f(bus, bdf, offset, &address) < 0) { | |
601 | *valuep = pci_get_ff(size); | |
602 | return 0; | |
603 | } | |
604 | ||
605 | switch (size) { | |
606 | case PCI_SIZE_8: | |
607 | *valuep = readb(address); | |
608 | return 0; | |
609 | case PCI_SIZE_16: | |
610 | *valuep = readw(address); | |
611 | return 0; | |
612 | case PCI_SIZE_32: | |
613 | *valuep = readl(address); | |
614 | return 0; | |
615 | default: | |
616 | return -EINVAL; | |
617 | } | |
618 | } | |
619 | ||
5e23b8b4 | 620 | int dm_pci_hose_probe_bus(struct udevice *bus) |
ff3e077b | 621 | { |
ff3e077b SG |
622 | int sub_bus; |
623 | int ret; | |
636cc177 SG |
624 | int ea_pos; |
625 | u8 reg; | |
ff3e077b SG |
626 | |
627 | debug("%s\n", __func__); | |
ff3e077b | 628 | |
636cc177 SG |
629 | ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA); |
630 | if (ea_pos) { | |
631 | dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8), | |
632 | ®); | |
633 | sub_bus = reg; | |
634 | } else { | |
635 | sub_bus = pci_get_bus_max() + 1; | |
636 | } | |
ff3e077b | 637 | debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); |
5e23b8b4 | 638 | dm_pciauto_prescan_setup_bridge(bus, sub_bus); |
ff3e077b SG |
639 | |
640 | ret = device_probe(bus); | |
641 | if (ret) { | |
3129ace4 | 642 | debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, |
ff3e077b SG |
643 | ret); |
644 | return ret; | |
645 | } | |
636cc177 SG |
646 | |
647 | if (!ea_pos) { | |
648 | if (sub_bus != bus->seq) { | |
649 | debug("%s: Internal error, bus '%s' got seq %d, expected %d\n", | |
650 | __func__, bus->name, bus->seq, sub_bus); | |
651 | return -EPIPE; | |
652 | } | |
653 | sub_bus = pci_get_bus_max(); | |
ff3e077b | 654 | } |
5e23b8b4 | 655 | dm_pciauto_postscan_setup_bridge(bus, sub_bus); |
ff3e077b SG |
656 | |
657 | return sub_bus; | |
658 | } | |
659 | ||
aba92962 SG |
660 | /** |
661 | * pci_match_one_device - Tell if a PCI device structure has a matching | |
662 | * PCI device id structure | |
663 | * @id: single PCI device id structure to match | |
0367bd4d | 664 | * @find: the PCI device id structure to match against |
aba92962 | 665 | * |
0367bd4d HZ |
666 | * Returns true if the finding pci_device_id structure matched or false if |
667 | * there is no match. | |
aba92962 SG |
668 | */ |
669 | static bool pci_match_one_id(const struct pci_device_id *id, | |
670 | const struct pci_device_id *find) | |
671 | { | |
672 | if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && | |
673 | (id->device == PCI_ANY_ID || id->device == find->device) && | |
674 | (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && | |
675 | (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && | |
676 | !((id->class ^ find->class) & id->class_mask)) | |
677 | return true; | |
678 | ||
679 | return false; | |
680 | } | |
681 | ||
682 | /** | |
683 | * pci_find_and_bind_driver() - Find and bind the right PCI driver | |
684 | * | |
685 | * This only looks at certain fields in the descriptor. | |
5dbcf3a0 SG |
686 | * |
687 | * @parent: Parent bus | |
688 | * @find_id: Specification of the driver to find | |
689 | * @bdf: Bus/device/function addreess - see PCI_BDF() | |
690 | * @devp: Returns a pointer to the device created | |
691 | * @return 0 if OK, -EPERM if the device is not needed before relocation and | |
692 | * therefore was not created, other -ve value on error | |
aba92962 SG |
693 | */ |
694 | static int pci_find_and_bind_driver(struct udevice *parent, | |
5dbcf3a0 SG |
695 | struct pci_device_id *find_id, |
696 | pci_dev_t bdf, struct udevice **devp) | |
aba92962 SG |
697 | { |
698 | struct pci_driver_entry *start, *entry; | |
02e4d38d | 699 | ofnode node = ofnode_null(); |
aba92962 SG |
700 | const char *drv; |
701 | int n_ents; | |
702 | int ret; | |
703 | char name[30], *str; | |
08fc7b8f | 704 | bool bridge; |
aba92962 SG |
705 | |
706 | *devp = NULL; | |
707 | ||
708 | debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, | |
709 | find_id->vendor, find_id->device); | |
02e4d38d MV |
710 | |
711 | /* Determine optional OF node */ | |
bc30140d SG |
712 | if (ofnode_valid(dev_ofnode(parent))) |
713 | pci_dev_find_ofnode(parent, bdf, &node); | |
02e4d38d | 714 | |
a6cd597a MW |
715 | if (ofnode_valid(node) && !ofnode_is_available(node)) { |
716 | debug("%s: Ignoring disabled device\n", __func__); | |
717 | return -EPERM; | |
718 | } | |
719 | ||
aba92962 SG |
720 | start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); |
721 | n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); | |
722 | for (entry = start; entry != start + n_ents; entry++) { | |
723 | const struct pci_device_id *id; | |
724 | struct udevice *dev; | |
725 | const struct driver *drv; | |
726 | ||
727 | for (id = entry->match; | |
728 | id->vendor || id->subvendor || id->class_mask; | |
729 | id++) { | |
730 | if (!pci_match_one_id(id, find_id)) | |
731 | continue; | |
732 | ||
733 | drv = entry->driver; | |
08fc7b8f BM |
734 | |
735 | /* | |
736 | * In the pre-relocation phase, we only bind devices | |
737 | * whose driver has the DM_FLAG_PRE_RELOC set, to save | |
738 | * precious memory space as on some platforms as that | |
739 | * space is pretty limited (ie: using Cache As RAM). | |
740 | */ | |
741 | if (!(gd->flags & GD_FLG_RELOC) && | |
742 | !(drv->flags & DM_FLAG_PRE_RELOC)) | |
5dbcf3a0 | 743 | return -EPERM; |
08fc7b8f | 744 | |
aba92962 SG |
745 | /* |
746 | * We could pass the descriptor to the driver as | |
caa4daa2 | 747 | * plat (instead of NULL) and allow its bind() |
aba92962 SG |
748 | * method to return -ENOENT if it doesn't support this |
749 | * device. That way we could continue the search to | |
750 | * find another driver. For now this doesn't seem | |
751 | * necesssary, so just bind the first match. | |
752 | */ | |
734206dd SG |
753 | ret = device_bind(parent, drv, drv->name, NULL, node, |
754 | &dev); | |
aba92962 SG |
755 | if (ret) |
756 | goto error; | |
757 | debug("%s: Match found: %s\n", __func__, drv->name); | |
ed698aa7 | 758 | dev->driver_data = id->driver_data; |
aba92962 SG |
759 | *devp = dev; |
760 | return 0; | |
761 | } | |
762 | } | |
763 | ||
08fc7b8f BM |
764 | bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; |
765 | /* | |
766 | * In the pre-relocation phase, we only bind bridge devices to save | |
767 | * precious memory space as on some platforms as that space is pretty | |
768 | * limited (ie: using Cache As RAM). | |
769 | */ | |
770 | if (!(gd->flags & GD_FLG_RELOC) && !bridge) | |
5dbcf3a0 | 771 | return -EPERM; |
08fc7b8f | 772 | |
aba92962 | 773 | /* Bind a generic driver so that the device can be used */ |
4d8615cb BM |
774 | sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), |
775 | PCI_FUNC(bdf)); | |
aba92962 SG |
776 | str = strdup(name); |
777 | if (!str) | |
778 | return -ENOMEM; | |
08fc7b8f BM |
779 | drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; |
780 | ||
02e4d38d | 781 | ret = device_bind_driver_to_node(parent, drv, str, node, devp); |
aba92962 | 782 | if (ret) { |
3129ace4 | 783 | debug("%s: Failed to bind generic driver: %d\n", __func__, ret); |
c42640c7 | 784 | free(str); |
aba92962 SG |
785 | return ret; |
786 | } | |
787 | debug("%s: No match found: bound generic driver instead\n", __func__); | |
788 | ||
789 | return 0; | |
790 | ||
791 | error: | |
792 | debug("%s: No match found: error %d\n", __func__, ret); | |
793 | return ret; | |
794 | } | |
795 | ||
ff3e077b SG |
796 | int pci_bind_bus_devices(struct udevice *bus) |
797 | { | |
798 | ulong vendor, device; | |
799 | ulong header_type; | |
4d8615cb | 800 | pci_dev_t bdf, end; |
ff3e077b | 801 | bool found_multi; |
a3fac3f3 | 802 | int ari_off; |
ff3e077b SG |
803 | int ret; |
804 | ||
805 | found_multi = false; | |
4d8615cb BM |
806 | end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, |
807 | PCI_MAX_PCI_FUNCTIONS - 1); | |
6d9f5b03 | 808 | for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end; |
4d8615cb | 809 | bdf += PCI_BDF(0, 0, 1)) { |
8a8d24bd | 810 | struct pci_child_plat *pplat; |
ff3e077b SG |
811 | struct udevice *dev; |
812 | ulong class; | |
813 | ||
64e45f73 BM |
814 | if (!PCI_FUNC(bdf)) |
815 | found_multi = false; | |
4d8615cb | 816 | if (PCI_FUNC(bdf) && !found_multi) |
ff3e077b | 817 | continue; |
2a87f7fd | 818 | |
ff3e077b | 819 | /* Check only the first access, we don't expect problems */ |
2a87f7fd HZ |
820 | ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, |
821 | PCI_SIZE_16); | |
ff3e077b SG |
822 | if (ret) |
823 | goto error; | |
2a87f7fd | 824 | |
ff3e077b SG |
825 | if (vendor == 0xffff || vendor == 0x0000) |
826 | continue; | |
827 | ||
2a87f7fd HZ |
828 | pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, |
829 | &header_type, PCI_SIZE_8); | |
830 | ||
4d8615cb | 831 | if (!PCI_FUNC(bdf)) |
ff3e077b SG |
832 | found_multi = header_type & 0x80; |
833 | ||
0911569b | 834 | debug("%s: bus %d/%s: found device %x, function %d", __func__, |
4d8615cb BM |
835 | bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); |
836 | pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, | |
ff3e077b | 837 | PCI_SIZE_16); |
4d8615cb | 838 | pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, |
aba92962 SG |
839 | PCI_SIZE_32); |
840 | class >>= 8; | |
ff3e077b SG |
841 | |
842 | /* Find this device in the device tree */ | |
4d8615cb | 843 | ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); |
0911569b | 844 | debug(": find ret=%d\n", ret); |
ff3e077b | 845 | |
8bd42525 | 846 | /* If nothing in the device tree, bind a device */ |
ff3e077b | 847 | if (ret == -ENODEV) { |
aba92962 SG |
848 | struct pci_device_id find_id; |
849 | ulong val; | |
850 | ||
851 | memset(&find_id, '\0', sizeof(find_id)); | |
852 | find_id.vendor = vendor; | |
853 | find_id.device = device; | |
854 | find_id.class = class; | |
855 | if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { | |
4d8615cb | 856 | pci_bus_read_config(bus, bdf, |
aba92962 SG |
857 | PCI_SUBSYSTEM_VENDOR_ID, |
858 | &val, PCI_SIZE_32); | |
859 | find_id.subvendor = val & 0xffff; | |
860 | find_id.subdevice = val >> 16; | |
861 | } | |
4d8615cb | 862 | ret = pci_find_and_bind_driver(bus, &find_id, bdf, |
aba92962 | 863 | &dev); |
ff3e077b | 864 | } |
5dbcf3a0 SG |
865 | if (ret == -EPERM) |
866 | continue; | |
867 | else if (ret) | |
ff3e077b SG |
868 | return ret; |
869 | ||
870 | /* Update the platform data */ | |
caa4daa2 | 871 | pplat = dev_get_parent_plat(dev); |
5dbcf3a0 SG |
872 | pplat->devfn = PCI_MASK_BUS(bdf); |
873 | pplat->vendor = vendor; | |
874 | pplat->device = device; | |
875 | pplat->class = class; | |
a3fac3f3 SG |
876 | |
877 | if (IS_ENABLED(CONFIG_PCI_ARID)) { | |
878 | ari_off = dm_pci_find_ext_capability(dev, | |
879 | PCI_EXT_CAP_ID_ARI); | |
880 | if (ari_off) { | |
881 | u16 ari_cap; | |
882 | ||
883 | /* | |
884 | * Read Next Function number in ARI Cap | |
885 | * Register | |
886 | */ | |
887 | dm_pci_read_config16(dev, ari_off + 4, | |
888 | &ari_cap); | |
889 | /* | |
890 | * Update next scan on this function number, | |
891 | * subtract 1 in BDF to satisfy loop increment. | |
892 | */ | |
893 | if (ari_cap & 0xff00) { | |
894 | bdf = PCI_BDF(PCI_BUS(bdf), | |
895 | PCI_DEV(ari_cap), | |
896 | PCI_FUNC(ari_cap)); | |
897 | bdf = bdf - 0x100; | |
898 | } | |
899 | } | |
900 | } | |
ff3e077b SG |
901 | } |
902 | ||
903 | return 0; | |
904 | error: | |
905 | printf("Cannot read bus configuration: %d\n", ret); | |
906 | ||
907 | return ret; | |
908 | } | |
909 | ||
f2825f6e CG |
910 | static void decode_regions(struct pci_controller *hose, ofnode parent_node, |
911 | ofnode node) | |
ff3e077b SG |
912 | { |
913 | int pci_addr_cells, addr_cells, size_cells; | |
914 | int cells_per_record; | |
dfaf6a57 | 915 | struct bd_info *bd; |
ff3e077b | 916 | const u32 *prop; |
e0024741 | 917 | int max_regions; |
ff3e077b SG |
918 | int len; |
919 | int i; | |
920 | ||
61e51bab | 921 | prop = ofnode_get_property(node, "ranges", &len); |
f2825f6e CG |
922 | if (!prop) { |
923 | debug("%s: Cannot decode regions\n", __func__); | |
924 | return; | |
925 | } | |
926 | ||
878d68c0 SG |
927 | pci_addr_cells = ofnode_read_simple_addr_cells(node); |
928 | addr_cells = ofnode_read_simple_addr_cells(parent_node); | |
929 | size_cells = ofnode_read_simple_size_cells(node); | |
ff3e077b SG |
930 | |
931 | /* PCI addresses are always 3-cells */ | |
932 | len /= sizeof(u32); | |
933 | cells_per_record = pci_addr_cells + addr_cells + size_cells; | |
934 | hose->region_count = 0; | |
935 | debug("%s: len=%d, cells_per_record=%d\n", __func__, len, | |
936 | cells_per_record); | |
e0024741 SR |
937 | |
938 | /* Dynamically allocate the regions array */ | |
939 | max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS; | |
940 | hose->regions = (struct pci_region *) | |
941 | calloc(1, max_regions * sizeof(struct pci_region)); | |
942 | ||
943 | for (i = 0; i < max_regions; i++, len -= cells_per_record) { | |
ff3e077b SG |
944 | u64 pci_addr, addr, size; |
945 | int space_code; | |
946 | u32 flags; | |
947 | int type; | |
9526d83a | 948 | int pos; |
ff3e077b SG |
949 | |
950 | if (len < cells_per_record) | |
951 | break; | |
952 | flags = fdt32_to_cpu(prop[0]); | |
953 | space_code = (flags >> 24) & 3; | |
954 | pci_addr = fdtdec_get_number(prop + 1, 2); | |
955 | prop += pci_addr_cells; | |
956 | addr = fdtdec_get_number(prop, addr_cells); | |
957 | prop += addr_cells; | |
958 | size = fdtdec_get_number(prop, size_cells); | |
959 | prop += size_cells; | |
dee37fc9 MY |
960 | debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n", |
961 | __func__, hose->region_count, pci_addr, addr, size, space_code); | |
ff3e077b SG |
962 | if (space_code & 2) { |
963 | type = flags & (1U << 30) ? PCI_REGION_PREFETCH : | |
964 | PCI_REGION_MEM; | |
965 | } else if (space_code & 1) { | |
966 | type = PCI_REGION_IO; | |
967 | } else { | |
968 | continue; | |
969 | } | |
52ba9073 TT |
970 | |
971 | if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) && | |
972 | type == PCI_REGION_MEM && upper_32_bits(pci_addr)) { | |
973 | debug(" - beyond the 32-bit boundary, ignoring\n"); | |
974 | continue; | |
975 | } | |
976 | ||
9526d83a | 977 | pos = -1; |
4cf56ec0 SG |
978 | if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) { |
979 | for (i = 0; i < hose->region_count; i++) { | |
980 | if (hose->regions[i].flags == type) | |
981 | pos = i; | |
982 | } | |
9526d83a | 983 | } |
4cf56ec0 | 984 | |
9526d83a SG |
985 | if (pos == -1) |
986 | pos = hose->region_count++; | |
987 | debug(" - type=%d, pos=%d\n", type, pos); | |
988 | pci_set_region(hose->regions + pos, pci_addr, addr, size, type); | |
ff3e077b SG |
989 | } |
990 | ||
991 | /* Add a region for our local memory */ | |
dfaf6a57 | 992 | bd = gd->bd; |
1eaf7800 | 993 | if (!bd) |
f2825f6e | 994 | return; |
1eaf7800 | 995 | |
664758c3 BM |
996 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) { |
997 | if (bd->bi_dram[i].size) { | |
998 | pci_set_region(hose->regions + hose->region_count++, | |
999 | bd->bi_dram[i].start, | |
1000 | bd->bi_dram[i].start, | |
1001 | bd->bi_dram[i].size, | |
1002 | PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); | |
1003 | } | |
1004 | } | |
ff3e077b | 1005 | |
f2825f6e | 1006 | return; |
ff3e077b SG |
1007 | } |
1008 | ||
1009 | static int pci_uclass_pre_probe(struct udevice *bus) | |
1010 | { | |
1011 | struct pci_controller *hose; | |
ff3e077b SG |
1012 | |
1013 | debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, | |
1014 | bus->parent->name); | |
1015 | hose = bus->uclass_priv; | |
1016 | ||
1017 | /* For bridges, use the top-level PCI controller */ | |
65f62b1c | 1018 | if (!device_is_on_pci_bus(bus)) { |
ff3e077b | 1019 | hose->ctlr = bus; |
f2825f6e | 1020 | decode_regions(hose, dev_ofnode(bus->parent), dev_ofnode(bus)); |
ff3e077b SG |
1021 | } else { |
1022 | struct pci_controller *parent_hose; | |
1023 | ||
1024 | parent_hose = dev_get_uclass_priv(bus->parent); | |
1025 | hose->ctlr = parent_hose->bus; | |
1026 | } | |
1027 | hose->bus = bus; | |
1028 | hose->first_busno = bus->seq; | |
1029 | hose->last_busno = bus->seq; | |
f0c36928 SG |
1030 | if (dev_of_valid(bus)) { |
1031 | hose->skip_auto_config_until_reloc = | |
1032 | dev_read_bool(bus, | |
1033 | "u-boot,skip-auto-config-until-reloc"); | |
1034 | } | |
ff3e077b SG |
1035 | |
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | static int pci_uclass_post_probe(struct udevice *bus) | |
1040 | { | |
2206ac24 | 1041 | struct pci_controller *hose = dev_get_uclass_priv(bus); |
ff3e077b SG |
1042 | int ret; |
1043 | ||
ff3e077b SG |
1044 | debug("%s: probing bus %d\n", __func__, bus->seq); |
1045 | ret = pci_bind_bus_devices(bus); | |
1046 | if (ret) | |
1047 | return ret; | |
1048 | ||
f1f44382 | 1049 | if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() && |
2206ac24 SG |
1050 | (!hose->skip_auto_config_until_reloc || |
1051 | (gd->flags & GD_FLG_RELOC))) { | |
1052 | ret = pci_auto_config_devices(bus); | |
1053 | if (ret < 0) | |
1054 | return log_msg_ret("pci auto-config", ret); | |
1055 | } | |
ff3e077b | 1056 | |
348b744b BM |
1057 | #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) |
1058 | /* | |
1059 | * Per Intel FSP specification, we should call FSP notify API to | |
1060 | * inform FSP that PCI enumeration has been done so that FSP will | |
1061 | * do any necessary initialization as required by the chipset's | |
1062 | * BIOS Writer's Guide (BWG). | |
1063 | * | |
1064 | * Unfortunately we have to put this call here as with driver model, | |
1065 | * the enumeration is all done on a lazy basis as needed, so until | |
1066 | * something is touched on PCI it won't happen. | |
1067 | * | |
1068 | * Note we only call this 1) after U-Boot is relocated, and 2) | |
1069 | * root bus has finished probing. | |
1070 | */ | |
f1f44382 | 1071 | if ((gd->flags & GD_FLG_RELOC) && bus->seq == 0 && ll_boot_init()) { |
348b744b | 1072 | ret = fsp_init_phase_pci(); |
4d21455e SG |
1073 | if (ret) |
1074 | return ret; | |
1075 | } | |
348b744b BM |
1076 | #endif |
1077 | ||
4d21455e | 1078 | return 0; |
ff3e077b SG |
1079 | } |
1080 | ||
1081 | static int pci_uclass_child_post_bind(struct udevice *dev) | |
1082 | { | |
8a8d24bd | 1083 | struct pci_child_plat *pplat; |
ff3e077b | 1084 | |
bf501595 | 1085 | if (!dev_of_valid(dev)) |
ff3e077b SG |
1086 | return 0; |
1087 | ||
caa4daa2 | 1088 | pplat = dev_get_parent_plat(dev); |
1f6b08b9 BM |
1089 | |
1090 | /* Extract vendor id and device id if available */ | |
1091 | ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device); | |
1092 | ||
1093 | /* Extract the devfn from fdt_pci_addr */ | |
b5214200 | 1094 | pplat->devfn = pci_get_devfn(dev); |
ff3e077b SG |
1095 | |
1096 | return 0; | |
1097 | } | |
1098 | ||
c4e72c4a | 1099 | static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf, |
4d8615cb BM |
1100 | uint offset, ulong *valuep, |
1101 | enum pci_size_t size) | |
ff3e077b SG |
1102 | { |
1103 | struct pci_controller *hose = bus->uclass_priv; | |
ff3e077b SG |
1104 | |
1105 | return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); | |
1106 | } | |
1107 | ||
4d8615cb BM |
1108 | static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, |
1109 | uint offset, ulong value, | |
1110 | enum pci_size_t size) | |
ff3e077b SG |
1111 | { |
1112 | struct pci_controller *hose = bus->uclass_priv; | |
ff3e077b SG |
1113 | |
1114 | return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); | |
1115 | } | |
1116 | ||
76c3fbcd SG |
1117 | static int skip_to_next_device(struct udevice *bus, struct udevice **devp) |
1118 | { | |
1119 | struct udevice *dev; | |
1120 | int ret = 0; | |
1121 | ||
1122 | /* | |
1123 | * Scan through all the PCI controllers. On x86 there will only be one | |
1124 | * but that is not necessarily true on other hardware. | |
1125 | */ | |
1126 | do { | |
1127 | device_find_first_child(bus, &dev); | |
1128 | if (dev) { | |
1129 | *devp = dev; | |
1130 | return 0; | |
1131 | } | |
1132 | ret = uclass_next_device(&bus); | |
1133 | if (ret) | |
1134 | return ret; | |
1135 | } while (bus); | |
1136 | ||
1137 | return 0; | |
1138 | } | |
1139 | ||
1140 | int pci_find_next_device(struct udevice **devp) | |
1141 | { | |
1142 | struct udevice *child = *devp; | |
1143 | struct udevice *bus = child->parent; | |
1144 | int ret; | |
1145 | ||
1146 | /* First try all the siblings */ | |
1147 | *devp = NULL; | |
1148 | while (child) { | |
1149 | device_find_next_child(&child); | |
1150 | if (child) { | |
1151 | *devp = child; | |
1152 | return 0; | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | /* We ran out of siblings. Try the next bus */ | |
1157 | ret = uclass_next_device(&bus); | |
1158 | if (ret) | |
1159 | return ret; | |
1160 | ||
1161 | return bus ? skip_to_next_device(bus, devp) : 0; | |
1162 | } | |
1163 | ||
1164 | int pci_find_first_device(struct udevice **devp) | |
1165 | { | |
1166 | struct udevice *bus; | |
1167 | int ret; | |
1168 | ||
1169 | *devp = NULL; | |
1170 | ret = uclass_first_device(UCLASS_PCI, &bus); | |
1171 | if (ret) | |
1172 | return ret; | |
1173 | ||
1174 | return skip_to_next_device(bus, devp); | |
1175 | } | |
1176 | ||
9289db6c SG |
1177 | ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) |
1178 | { | |
1179 | switch (size) { | |
1180 | case PCI_SIZE_8: | |
1181 | return (value >> ((offset & 3) * 8)) & 0xff; | |
1182 | case PCI_SIZE_16: | |
1183 | return (value >> ((offset & 2) * 8)) & 0xffff; | |
1184 | default: | |
1185 | return value; | |
1186 | } | |
1187 | } | |
1188 | ||
1189 | ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, | |
1190 | enum pci_size_t size) | |
1191 | { | |
1192 | uint off_mask; | |
1193 | uint val_mask, shift; | |
1194 | ulong ldata, mask; | |
1195 | ||
1196 | switch (size) { | |
1197 | case PCI_SIZE_8: | |
1198 | off_mask = 3; | |
1199 | val_mask = 0xff; | |
1200 | break; | |
1201 | case PCI_SIZE_16: | |
1202 | off_mask = 2; | |
1203 | val_mask = 0xffff; | |
1204 | break; | |
1205 | default: | |
1206 | return value; | |
1207 | } | |
1208 | shift = (offset & off_mask) * 8; | |
1209 | ldata = (value & val_mask) << shift; | |
1210 | mask = val_mask << shift; | |
1211 | value = (old & ~mask) | ldata; | |
1212 | ||
1213 | return value; | |
1214 | } | |
1215 | ||
143eb5b1 RK |
1216 | int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index) |
1217 | { | |
1218 | int pci_addr_cells, addr_cells, size_cells; | |
1219 | int cells_per_record; | |
1220 | const u32 *prop; | |
1221 | int len; | |
1222 | int i = 0; | |
1223 | ||
1224 | prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len); | |
1225 | if (!prop) { | |
1226 | log_err("PCI: Device '%s': Cannot decode dma-ranges\n", | |
1227 | dev->name); | |
1228 | return -EINVAL; | |
1229 | } | |
1230 | ||
1231 | pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev)); | |
1232 | addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent)); | |
1233 | size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev)); | |
1234 | ||
1235 | /* PCI addresses are always 3-cells */ | |
1236 | len /= sizeof(u32); | |
1237 | cells_per_record = pci_addr_cells + addr_cells + size_cells; | |
1238 | debug("%s: len=%d, cells_per_record=%d\n", __func__, len, | |
1239 | cells_per_record); | |
1240 | ||
1241 | while (len) { | |
1242 | memp->bus_start = fdtdec_get_number(prop + 1, 2); | |
1243 | prop += pci_addr_cells; | |
1244 | memp->phys_start = fdtdec_get_number(prop, addr_cells); | |
1245 | prop += addr_cells; | |
1246 | memp->size = fdtdec_get_number(prop, size_cells); | |
1247 | prop += size_cells; | |
1248 | ||
1249 | if (i == index) | |
1250 | return 0; | |
1251 | i++; | |
1252 | len -= cells_per_record; | |
1253 | } | |
1254 | ||
1255 | return -EINVAL; | |
1256 | } | |
1257 | ||
f9260336 SG |
1258 | int pci_get_regions(struct udevice *dev, struct pci_region **iop, |
1259 | struct pci_region **memp, struct pci_region **prefp) | |
1260 | { | |
1261 | struct udevice *bus = pci_get_controller(dev); | |
1262 | struct pci_controller *hose = dev_get_uclass_priv(bus); | |
1263 | int i; | |
1264 | ||
1265 | *iop = NULL; | |
1266 | *memp = NULL; | |
1267 | *prefp = NULL; | |
1268 | for (i = 0; i < hose->region_count; i++) { | |
1269 | switch (hose->regions[i].flags) { | |
1270 | case PCI_REGION_IO: | |
1271 | if (!*iop || (*iop)->size < hose->regions[i].size) | |
1272 | *iop = hose->regions + i; | |
1273 | break; | |
1274 | case PCI_REGION_MEM: | |
1275 | if (!*memp || (*memp)->size < hose->regions[i].size) | |
1276 | *memp = hose->regions + i; | |
1277 | break; | |
1278 | case (PCI_REGION_MEM | PCI_REGION_PREFETCH): | |
1279 | if (!*prefp || (*prefp)->size < hose->regions[i].size) | |
1280 | *prefp = hose->regions + i; | |
1281 | break; | |
1282 | } | |
1283 | } | |
1284 | ||
1285 | return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); | |
1286 | } | |
1287 | ||
194fca91 | 1288 | u32 dm_pci_read_bar32(const struct udevice *dev, int barnum) |
bab17cf1 SG |
1289 | { |
1290 | u32 addr; | |
1291 | int bar; | |
1292 | ||
1293 | bar = PCI_BASE_ADDRESS_0 + barnum * 4; | |
1294 | dm_pci_read_config32(dev, bar, &addr); | |
9ece4b09 SG |
1295 | |
1296 | /* | |
1297 | * If we get an invalid address, return this so that comparisons with | |
1298 | * FDT_ADDR_T_NONE work correctly | |
1299 | */ | |
1300 | if (addr == 0xffffffff) | |
1301 | return addr; | |
1302 | else if (addr & PCI_BASE_ADDRESS_SPACE_IO) | |
bab17cf1 SG |
1303 | return addr & PCI_BASE_ADDRESS_IO_MASK; |
1304 | else | |
1305 | return addr & PCI_BASE_ADDRESS_MEM_MASK; | |
1306 | } | |
1307 | ||
9d731c82 SG |
1308 | void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr) |
1309 | { | |
1310 | int bar; | |
1311 | ||
1312 | bar = PCI_BASE_ADDRESS_0 + barnum * 4; | |
1313 | dm_pci_write_config32(dev, bar, addr); | |
1314 | } | |
1315 | ||
21d1fe7e SG |
1316 | static int _dm_pci_bus_to_phys(struct udevice *ctlr, |
1317 | pci_addr_t bus_addr, unsigned long flags, | |
1318 | unsigned long skip_mask, phys_addr_t *pa) | |
1319 | { | |
1320 | struct pci_controller *hose = dev_get_uclass_priv(ctlr); | |
1321 | struct pci_region *res; | |
1322 | int i; | |
1323 | ||
6f95d89c CG |
1324 | if (hose->region_count == 0) { |
1325 | *pa = bus_addr; | |
1326 | return 0; | |
1327 | } | |
1328 | ||
21d1fe7e SG |
1329 | for (i = 0; i < hose->region_count; i++) { |
1330 | res = &hose->regions[i]; | |
1331 | ||
1332 | if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) | |
1333 | continue; | |
1334 | ||
1335 | if (res->flags & skip_mask) | |
1336 | continue; | |
1337 | ||
1338 | if (bus_addr >= res->bus_start && | |
1339 | (bus_addr - res->bus_start) < res->size) { | |
1340 | *pa = (bus_addr - res->bus_start + res->phys_start); | |
1341 | return 0; | |
1342 | } | |
1343 | } | |
1344 | ||
1345 | return 1; | |
1346 | } | |
1347 | ||
1348 | phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, | |
1349 | unsigned long flags) | |
1350 | { | |
1351 | phys_addr_t phys_addr = 0; | |
1352 | struct udevice *ctlr; | |
1353 | int ret; | |
1354 | ||
1355 | /* The root controller has the region information */ | |
1356 | ctlr = pci_get_controller(dev); | |
1357 | ||
1358 | /* | |
1359 | * if PCI_REGION_MEM is set we do a two pass search with preference | |
1360 | * on matches that don't have PCI_REGION_SYS_MEMORY set | |
1361 | */ | |
1362 | if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { | |
1363 | ret = _dm_pci_bus_to_phys(ctlr, bus_addr, | |
1364 | flags, PCI_REGION_SYS_MEMORY, | |
1365 | &phys_addr); | |
1366 | if (!ret) | |
1367 | return phys_addr; | |
1368 | } | |
1369 | ||
1370 | ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr); | |
1371 | ||
1372 | if (ret) | |
1373 | puts("pci_hose_bus_to_phys: invalid physical address\n"); | |
1374 | ||
1375 | return phys_addr; | |
1376 | } | |
1377 | ||
1378 | int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, | |
1379 | unsigned long flags, unsigned long skip_mask, | |
1380 | pci_addr_t *ba) | |
1381 | { | |
1382 | struct pci_region *res; | |
1383 | struct udevice *ctlr; | |
1384 | pci_addr_t bus_addr; | |
1385 | int i; | |
1386 | struct pci_controller *hose; | |
1387 | ||
1388 | /* The root controller has the region information */ | |
1389 | ctlr = pci_get_controller(dev); | |
1390 | hose = dev_get_uclass_priv(ctlr); | |
1391 | ||
6f95d89c CG |
1392 | if (hose->region_count == 0) { |
1393 | *ba = phys_addr; | |
1394 | return 0; | |
1395 | } | |
1396 | ||
21d1fe7e SG |
1397 | for (i = 0; i < hose->region_count; i++) { |
1398 | res = &hose->regions[i]; | |
1399 | ||
1400 | if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) | |
1401 | continue; | |
1402 | ||
1403 | if (res->flags & skip_mask) | |
1404 | continue; | |
1405 | ||
1406 | bus_addr = phys_addr - res->phys_start + res->bus_start; | |
1407 | ||
1408 | if (bus_addr >= res->bus_start && | |
1409 | (bus_addr - res->bus_start) < res->size) { | |
1410 | *ba = bus_addr; | |
1411 | return 0; | |
1412 | } | |
1413 | } | |
1414 | ||
1415 | return 1; | |
1416 | } | |
1417 | ||
1418 | pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, | |
1419 | unsigned long flags) | |
1420 | { | |
1421 | pci_addr_t bus_addr = 0; | |
1422 | int ret; | |
1423 | ||
1424 | /* | |
1425 | * if PCI_REGION_MEM is set we do a two pass search with preference | |
1426 | * on matches that don't have PCI_REGION_SYS_MEMORY set | |
1427 | */ | |
1428 | if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { | |
1429 | ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, | |
1430 | PCI_REGION_SYS_MEMORY, &bus_addr); | |
1431 | if (!ret) | |
1432 | return bus_addr; | |
1433 | } | |
1434 | ||
1435 | ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr); | |
1436 | ||
1437 | if (ret) | |
1438 | puts("pci_hose_phys_to_bus: invalid physical address\n"); | |
1439 | ||
1440 | return bus_addr; | |
1441 | } | |
1442 | ||
51eeae91 | 1443 | static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off, |
8a8d24bd | 1444 | struct pci_child_plat *pdata) |
51eeae91 SG |
1445 | { |
1446 | phys_addr_t addr = 0; | |
1447 | ||
1448 | /* | |
1449 | * In the case of a Virtual Function device using BAR | |
1450 | * base and size, add offset for VFn BAR(1, 2, 3...n) | |
1451 | */ | |
1452 | if (pdata->is_virtfn) { | |
1453 | size_t sz; | |
1454 | u32 ea_entry; | |
1455 | ||
1456 | /* MaxOffset, 1st DW */ | |
1457 | dm_pci_read_config32(dev, ea_off + 8, &ea_entry); | |
1458 | sz = ea_entry & PCI_EA_FIELD_MASK; | |
1459 | /* Fill up lower 2 bits */ | |
1460 | sz |= (~PCI_EA_FIELD_MASK); | |
1461 | ||
1462 | if (ea_entry & PCI_EA_IS_64) { | |
1463 | /* MaxOffset 2nd DW */ | |
1464 | dm_pci_read_config32(dev, ea_off + 16, &ea_entry); | |
1465 | sz |= ((u64)ea_entry) << 32; | |
1466 | } | |
1467 | ||
1468 | addr = (pdata->virtid - 1) * (sz + 1); | |
1469 | } | |
1470 | ||
1471 | return addr; | |
1472 | } | |
1473 | ||
0b143d8a | 1474 | static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags, |
8a8d24bd | 1475 | int ea_off, struct pci_child_plat *pdata) |
0b143d8a AM |
1476 | { |
1477 | int ea_cnt, i, entry_size; | |
1478 | int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2; | |
1479 | u32 ea_entry; | |
1480 | phys_addr_t addr; | |
1481 | ||
51eeae91 SG |
1482 | if (IS_ENABLED(CONFIG_PCI_SRIOV)) { |
1483 | /* | |
1484 | * In the case of a Virtual Function device, device is | |
1485 | * Physical function, so pdata will point to required VF | |
1486 | * specific data. | |
1487 | */ | |
1488 | if (pdata->is_virtfn) | |
1489 | bar_id += PCI_EA_BEI_VF_BAR0; | |
1490 | } | |
1491 | ||
0b143d8a AM |
1492 | /* EA capability structure header */ |
1493 | dm_pci_read_config32(dev, ea_off, &ea_entry); | |
1494 | ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK; | |
1495 | ea_off += PCI_EA_FIRST_ENT; | |
1496 | ||
1497 | for (i = 0; i < ea_cnt; i++, ea_off += entry_size) { | |
1498 | /* Entry header */ | |
1499 | dm_pci_read_config32(dev, ea_off, &ea_entry); | |
1500 | entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2; | |
1501 | ||
1502 | if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id) | |
1503 | continue; | |
1504 | ||
1505 | /* Base address, 1st DW */ | |
1506 | dm_pci_read_config32(dev, ea_off + 4, &ea_entry); | |
1507 | addr = ea_entry & PCI_EA_FIELD_MASK; | |
1508 | if (ea_entry & PCI_EA_IS_64) { | |
1509 | /* Base address, 2nd DW, skip over 4B MaxOffset */ | |
1510 | dm_pci_read_config32(dev, ea_off + 12, &ea_entry); | |
1511 | addr |= ((u64)ea_entry) << 32; | |
1512 | } | |
1513 | ||
51eeae91 SG |
1514 | if (IS_ENABLED(CONFIG_PCI_SRIOV)) |
1515 | addr += dm_pci_map_ea_virt(dev, ea_off, pdata); | |
1516 | ||
0b143d8a | 1517 | /* size ignored for now */ |
b3699a13 | 1518 | return map_physmem(addr, 0, flags); |
0b143d8a AM |
1519 | } |
1520 | ||
1521 | return 0; | |
1522 | } | |
1523 | ||
21d1fe7e SG |
1524 | void *dm_pci_map_bar(struct udevice *dev, int bar, int flags) |
1525 | { | |
8a8d24bd | 1526 | struct pci_child_plat *pdata = dev_get_parent_plat(dev); |
51eeae91 | 1527 | struct udevice *udev = dev; |
21d1fe7e SG |
1528 | pci_addr_t pci_bus_addr; |
1529 | u32 bar_response; | |
0b143d8a AM |
1530 | int ea_off; |
1531 | ||
51eeae91 SG |
1532 | if (IS_ENABLED(CONFIG_PCI_SRIOV)) { |
1533 | /* | |
1534 | * In case of Virtual Function devices, use PF udevice | |
1535 | * as EA capability is defined in Physical Function | |
1536 | */ | |
1537 | if (pdata->is_virtfn) | |
1538 | udev = pdata->pfdev; | |
1539 | } | |
1540 | ||
0b143d8a AM |
1541 | /* |
1542 | * if the function supports Enhanced Allocation use that instead of | |
1543 | * BARs | |
51eeae91 SG |
1544 | * Incase of virtual functions, pdata will help read VF BEI |
1545 | * and EA entry size. | |
0b143d8a | 1546 | */ |
51eeae91 | 1547 | ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA); |
0b143d8a | 1548 | if (ea_off) |
51eeae91 | 1549 | return dm_pci_map_ea_bar(udev, bar, flags, ea_off, pdata); |
21d1fe7e SG |
1550 | |
1551 | /* read BAR address */ | |
51eeae91 | 1552 | dm_pci_read_config32(udev, bar, &bar_response); |
21d1fe7e SG |
1553 | pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); |
1554 | ||
1555 | /* | |
1556 | * Pass "0" as the length argument to pci_bus_to_virt. The arg | |
b3699a13 | 1557 | * isn't actually used on any platform because U-Boot assumes a static |
21d1fe7e SG |
1558 | * linear mapping. In the future, this could read the BAR size |
1559 | * and pass that as the size if needed. | |
1560 | */ | |
51eeae91 | 1561 | return dm_pci_bus_to_virt(udev, pci_bus_addr, flags, 0, MAP_NOCACHE); |
21d1fe7e SG |
1562 | } |
1563 | ||
a8c5f8d3 | 1564 | static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap) |
dac01fd8 | 1565 | { |
dac01fd8 BM |
1566 | int ttl = PCI_FIND_CAP_TTL; |
1567 | u8 id; | |
1568 | u16 ent; | |
dac01fd8 BM |
1569 | |
1570 | dm_pci_read_config8(dev, pos, &pos); | |
a8c5f8d3 | 1571 | |
dac01fd8 BM |
1572 | while (ttl--) { |
1573 | if (pos < PCI_STD_HEADER_SIZEOF) | |
1574 | break; | |
1575 | pos &= ~3; | |
1576 | dm_pci_read_config16(dev, pos, &ent); | |
1577 | ||
1578 | id = ent & 0xff; | |
1579 | if (id == 0xff) | |
1580 | break; | |
1581 | if (id == cap) | |
1582 | return pos; | |
1583 | pos = (ent >> 8); | |
1584 | } | |
1585 | ||
1586 | return 0; | |
1587 | } | |
1588 | ||
a8c5f8d3 BM |
1589 | int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap) |
1590 | { | |
1591 | return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT, | |
1592 | cap); | |
1593 | } | |
1594 | ||
1595 | int dm_pci_find_capability(struct udevice *dev, int cap) | |
1596 | { | |
1597 | u16 status; | |
1598 | u8 header_type; | |
1599 | u8 pos; | |
1600 | ||
1601 | dm_pci_read_config16(dev, PCI_STATUS, &status); | |
1602 | if (!(status & PCI_STATUS_CAP_LIST)) | |
1603 | return 0; | |
1604 | ||
1605 | dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type); | |
1606 | if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS) | |
1607 | pos = PCI_CB_CAPABILITY_LIST; | |
1608 | else | |
1609 | pos = PCI_CAPABILITY_LIST; | |
1610 | ||
1611 | return _dm_pci_find_next_capability(dev, pos, cap); | |
1612 | } | |
1613 | ||
1614 | int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap) | |
dac01fd8 BM |
1615 | { |
1616 | u32 header; | |
1617 | int ttl; | |
1618 | int pos = PCI_CFG_SPACE_SIZE; | |
1619 | ||
1620 | /* minimum 8 bytes per capability */ | |
1621 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | |
1622 | ||
a8c5f8d3 BM |
1623 | if (start) |
1624 | pos = start; | |
1625 | ||
dac01fd8 BM |
1626 | dm_pci_read_config32(dev, pos, &header); |
1627 | /* | |
1628 | * If we have no capabilities, this is indicated by cap ID, | |
1629 | * cap version and next pointer all being 0. | |
1630 | */ | |
1631 | if (header == 0) | |
1632 | return 0; | |
1633 | ||
1634 | while (ttl--) { | |
1635 | if (PCI_EXT_CAP_ID(header) == cap) | |
1636 | return pos; | |
1637 | ||
1638 | pos = PCI_EXT_CAP_NEXT(header); | |
1639 | if (pos < PCI_CFG_SPACE_SIZE) | |
1640 | break; | |
1641 | ||
1642 | dm_pci_read_config32(dev, pos, &header); | |
1643 | } | |
1644 | ||
1645 | return 0; | |
1646 | } | |
1647 | ||
a8c5f8d3 BM |
1648 | int dm_pci_find_ext_capability(struct udevice *dev, int cap) |
1649 | { | |
1650 | return dm_pci_find_next_ext_capability(dev, 0, cap); | |
1651 | } | |
1652 | ||
b8e1f827 AM |
1653 | int dm_pci_flr(struct udevice *dev) |
1654 | { | |
1655 | int pcie_off; | |
1656 | u32 cap; | |
1657 | ||
1658 | /* look for PCI Express Capability */ | |
1659 | pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP); | |
1660 | if (!pcie_off) | |
1661 | return -ENOENT; | |
1662 | ||
1663 | /* check FLR capability */ | |
1664 | dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap); | |
1665 | if (!(cap & PCI_EXP_DEVCAP_FLR)) | |
1666 | return -ENOENT; | |
1667 | ||
1668 | dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0, | |
1669 | PCI_EXP_DEVCTL_BCR_FLR); | |
1670 | ||
1671 | /* wait 100ms, per PCI spec */ | |
1672 | mdelay(100); | |
1673 | ||
1674 | return 0; | |
1675 | } | |
1676 | ||
b8852dcf SG |
1677 | #if defined(CONFIG_PCI_SRIOV) |
1678 | int pci_sriov_init(struct udevice *pdev, int vf_en) | |
1679 | { | |
1680 | u16 vendor, device; | |
1681 | struct udevice *bus; | |
1682 | struct udevice *dev; | |
1683 | pci_dev_t bdf; | |
1684 | u16 ctrl; | |
1685 | u16 num_vfs; | |
1686 | u16 total_vf; | |
1687 | u16 vf_offset; | |
1688 | u16 vf_stride; | |
1689 | int vf, ret; | |
1690 | int pos; | |
1691 | ||
1692 | pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | |
1693 | if (!pos) { | |
1694 | debug("Error: SRIOV capability not found\n"); | |
1695 | return -ENOENT; | |
1696 | } | |
1697 | ||
1698 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl); | |
1699 | ||
1700 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf); | |
1701 | if (vf_en > total_vf) | |
1702 | vf_en = total_vf; | |
1703 | dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en); | |
1704 | ||
1705 | ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; | |
1706 | dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl); | |
1707 | ||
1708 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs); | |
1709 | if (num_vfs > vf_en) | |
1710 | num_vfs = vf_en; | |
1711 | ||
1712 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset); | |
1713 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride); | |
1714 | ||
1715 | dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor); | |
1716 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device); | |
1717 | ||
1718 | bdf = dm_pci_get_bdf(pdev); | |
1719 | ||
1720 | pci_get_bus(PCI_BUS(bdf), &bus); | |
1721 | ||
1722 | if (!bus) | |
1723 | return -ENODEV; | |
1724 | ||
1725 | bdf += PCI_BDF(0, 0, vf_offset); | |
1726 | ||
1727 | for (vf = 0; vf < num_vfs; vf++) { | |
8a8d24bd | 1728 | struct pci_child_plat *pplat; |
b8852dcf SG |
1729 | ulong class; |
1730 | ||
1731 | pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE, | |
1732 | &class, PCI_SIZE_16); | |
1733 | ||
1734 | debug("%s: bus %d/%s: found VF %x:%x\n", __func__, | |
1735 | bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); | |
1736 | ||
1737 | /* Find this device in the device tree */ | |
1738 | ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); | |
1739 | ||
1740 | if (ret == -ENODEV) { | |
1741 | struct pci_device_id find_id; | |
1742 | ||
1743 | memset(&find_id, '\0', sizeof(find_id)); | |
1744 | find_id.vendor = vendor; | |
1745 | find_id.device = device; | |
1746 | find_id.class = class; | |
1747 | ||
1748 | ret = pci_find_and_bind_driver(bus, &find_id, | |
1749 | bdf, &dev); | |
1750 | ||
1751 | if (ret) | |
1752 | return ret; | |
1753 | } | |
1754 | ||
1755 | /* Update the platform data */ | |
caa4daa2 | 1756 | pplat = dev_get_parent_plat(dev); |
b8852dcf SG |
1757 | pplat->devfn = PCI_MASK_BUS(bdf); |
1758 | pplat->vendor = vendor; | |
1759 | pplat->device = device; | |
1760 | pplat->class = class; | |
1761 | pplat->is_virtfn = true; | |
1762 | pplat->pfdev = pdev; | |
1763 | pplat->virtid = vf * vf_stride + vf_offset; | |
1764 | ||
1765 | debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n", | |
1766 | __func__, dev->seq, dev->name, PCI_DEV(bdf), | |
1767 | PCI_FUNC(bdf), vendor, device, class, pplat->virtid); | |
1768 | bdf += PCI_BDF(0, 0, vf_stride); | |
1769 | } | |
1770 | ||
1771 | return 0; | |
1772 | } | |
1773 | ||
1774 | int pci_sriov_get_totalvfs(struct udevice *pdev) | |
1775 | { | |
1776 | u16 total_vf; | |
1777 | int pos; | |
1778 | ||
1779 | pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | |
1780 | if (!pos) { | |
1781 | debug("Error: SRIOV capability not found\n"); | |
1782 | return -ENOENT; | |
1783 | } | |
1784 | ||
1785 | dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf); | |
1786 | ||
1787 | return total_vf; | |
1788 | } | |
1789 | #endif /* SRIOV */ | |
1790 | ||
ff3e077b SG |
1791 | UCLASS_DRIVER(pci) = { |
1792 | .id = UCLASS_PCI, | |
1793 | .name = "pci", | |
2bb02e4f | 1794 | .flags = DM_UC_FLAG_SEQ_ALIAS, |
91195485 | 1795 | .post_bind = dm_scan_fdt_dev, |
ff3e077b SG |
1796 | .pre_probe = pci_uclass_pre_probe, |
1797 | .post_probe = pci_uclass_post_probe, | |
1798 | .child_post_bind = pci_uclass_child_post_bind, | |
41575d8e | 1799 | .per_device_auto = sizeof(struct pci_controller), |
8a8d24bd | 1800 | .per_child_plat_auto = sizeof(struct pci_child_plat), |
ff3e077b SG |
1801 | }; |
1802 | ||
1803 | static const struct dm_pci_ops pci_bridge_ops = { | |
1804 | .read_config = pci_bridge_read_config, | |
1805 | .write_config = pci_bridge_write_config, | |
1806 | }; | |
1807 | ||
1808 | static const struct udevice_id pci_bridge_ids[] = { | |
1809 | { .compatible = "pci-bridge" }, | |
1810 | { } | |
1811 | }; | |
1812 | ||
1813 | U_BOOT_DRIVER(pci_bridge_drv) = { | |
1814 | .name = "pci_bridge_drv", | |
1815 | .id = UCLASS_PCI, | |
1816 | .of_match = pci_bridge_ids, | |
1817 | .ops = &pci_bridge_ops, | |
1818 | }; | |
1819 | ||
1820 | UCLASS_DRIVER(pci_generic) = { | |
1821 | .id = UCLASS_PCI_GENERIC, | |
1822 | .name = "pci_generic", | |
1823 | }; | |
1824 | ||
1825 | static const struct udevice_id pci_generic_ids[] = { | |
1826 | { .compatible = "pci-generic" }, | |
1827 | { } | |
1828 | }; | |
1829 | ||
1830 | U_BOOT_DRIVER(pci_generic_drv) = { | |
1831 | .name = "pci_generic_drv", | |
1832 | .id = UCLASS_PCI_GENERIC, | |
1833 | .of_match = pci_generic_ids, | |
1834 | }; | |
e578b92c SW |
1835 | |
1836 | void pci_init(void) | |
1837 | { | |
1838 | struct udevice *bus; | |
1839 | ||
1840 | /* | |
1841 | * Enumerate all known controller devices. Enumeration has the side- | |
1842 | * effect of probing them, so PCIe devices will be enumerated too. | |
1843 | */ | |
60ee6094 | 1844 | for (uclass_first_device_check(UCLASS_PCI, &bus); |
e578b92c | 1845 | bus; |
60ee6094 | 1846 | uclass_next_device_check(&bus)) { |
e578b92c SW |
1847 | ; |
1848 | } | |
1849 | } |