]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
ff3e077b SG |
2 | /* |
3 | * Copyright (c) 2014 Google, Inc | |
4 | * Written by Simon Glass <[email protected]> | |
ff3e077b SG |
5 | */ |
6 | ||
7 | #include <common.h> | |
8 | #include <dm.h> | |
9 | #include <errno.h> | |
ff3e077b | 10 | #include <pci.h> |
21d1fe7e | 11 | #include <asm/io.h> |
ff3e077b | 12 | #include <dm/device-internal.h> |
bf501595 | 13 | #include <dm/lists.h> |
348b744b | 14 | #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) |
07f2f58b | 15 | #include <asm/fsp/fsp_support.h> |
348b744b | 16 | #endif |
5e23b8b4 | 17 | #include "pci_internal.h" |
ff3e077b SG |
18 | |
19 | DECLARE_GLOBAL_DATA_PTR; | |
20 | ||
a6eb93b3 | 21 | int pci_get_bus(int busnum, struct udevice **busp) |
983c6ba2 SG |
22 | { |
23 | int ret; | |
24 | ||
25 | ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); | |
26 | ||
27 | /* Since buses may not be numbered yet try a little harder with bus 0 */ | |
28 | if (ret == -ENODEV) { | |
3f603cbb | 29 | ret = uclass_first_device_err(UCLASS_PCI, busp); |
983c6ba2 SG |
30 | if (ret) |
31 | return ret; | |
983c6ba2 SG |
32 | ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); |
33 | } | |
34 | ||
35 | return ret; | |
36 | } | |
37 | ||
9f60fb0d SG |
38 | struct udevice *pci_get_controller(struct udevice *dev) |
39 | { | |
40 | while (device_is_on_pci_bus(dev)) | |
41 | dev = dev->parent; | |
42 | ||
43 | return dev; | |
44 | } | |
45 | ||
21ccce1b | 46 | pci_dev_t dm_pci_get_bdf(struct udevice *dev) |
4b515e4f SG |
47 | { |
48 | struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); | |
49 | struct udevice *bus = dev->parent; | |
50 | ||
51 | return PCI_ADD_BUS(bus->seq, pplat->devfn); | |
52 | } | |
53 | ||
ff3e077b SG |
54 | /** |
55 | * pci_get_bus_max() - returns the bus number of the last active bus | |
56 | * | |
57 | * @return last bus number, or -1 if no active buses | |
58 | */ | |
59 | static int pci_get_bus_max(void) | |
60 | { | |
61 | struct udevice *bus; | |
62 | struct uclass *uc; | |
63 | int ret = -1; | |
64 | ||
65 | ret = uclass_get(UCLASS_PCI, &uc); | |
66 | uclass_foreach_dev(bus, uc) { | |
67 | if (bus->seq > ret) | |
68 | ret = bus->seq; | |
69 | } | |
70 | ||
71 | debug("%s: ret=%d\n", __func__, ret); | |
72 | ||
73 | return ret; | |
74 | } | |
75 | ||
76 | int pci_last_busno(void) | |
77 | { | |
069155cb | 78 | return pci_get_bus_max(); |
ff3e077b SG |
79 | } |
80 | ||
81 | int pci_get_ff(enum pci_size_t size) | |
82 | { | |
83 | switch (size) { | |
84 | case PCI_SIZE_8: | |
85 | return 0xff; | |
86 | case PCI_SIZE_16: | |
87 | return 0xffff; | |
88 | default: | |
89 | return 0xffffffff; | |
90 | } | |
91 | } | |
92 | ||
02e4d38d MV |
93 | static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf, |
94 | ofnode *rnode) | |
95 | { | |
96 | struct fdt_pci_addr addr; | |
97 | ofnode node; | |
98 | int ret; | |
99 | ||
100 | dev_for_each_subnode(node, bus) { | |
101 | ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg", | |
102 | &addr); | |
103 | if (ret) | |
104 | continue; | |
105 | ||
106 | if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf)) | |
107 | continue; | |
108 | ||
109 | *rnode = node; | |
110 | break; | |
111 | } | |
112 | }; | |
113 | ||
ff3e077b SG |
114 | int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn, |
115 | struct udevice **devp) | |
116 | { | |
117 | struct udevice *dev; | |
118 | ||
119 | for (device_find_first_child(bus, &dev); | |
120 | dev; | |
121 | device_find_next_child(&dev)) { | |
122 | struct pci_child_platdata *pplat; | |
123 | ||
124 | pplat = dev_get_parent_platdata(dev); | |
125 | if (pplat && pplat->devfn == find_devfn) { | |
126 | *devp = dev; | |
127 | return 0; | |
128 | } | |
129 | } | |
130 | ||
131 | return -ENODEV; | |
132 | } | |
133 | ||
f3f1faef | 134 | int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) |
ff3e077b SG |
135 | { |
136 | struct udevice *bus; | |
137 | int ret; | |
138 | ||
983c6ba2 | 139 | ret = pci_get_bus(PCI_BUS(bdf), &bus); |
ff3e077b SG |
140 | if (ret) |
141 | return ret; | |
142 | return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); | |
143 | } | |
144 | ||
145 | static int pci_device_matches_ids(struct udevice *dev, | |
146 | struct pci_device_id *ids) | |
147 | { | |
148 | struct pci_child_platdata *pplat; | |
149 | int i; | |
150 | ||
151 | pplat = dev_get_parent_platdata(dev); | |
152 | if (!pplat) | |
153 | return -EINVAL; | |
154 | for (i = 0; ids[i].vendor != 0; i++) { | |
155 | if (pplat->vendor == ids[i].vendor && | |
156 | pplat->device == ids[i].device) | |
157 | return i; | |
158 | } | |
159 | ||
160 | return -EINVAL; | |
161 | } | |
162 | ||
163 | int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, | |
164 | int *indexp, struct udevice **devp) | |
165 | { | |
166 | struct udevice *dev; | |
167 | ||
168 | /* Scan all devices on this bus */ | |
169 | for (device_find_first_child(bus, &dev); | |
170 | dev; | |
171 | device_find_next_child(&dev)) { | |
172 | if (pci_device_matches_ids(dev, ids) >= 0) { | |
173 | if ((*indexp)-- <= 0) { | |
174 | *devp = dev; | |
175 | return 0; | |
176 | } | |
177 | } | |
178 | } | |
179 | ||
180 | return -ENODEV; | |
181 | } | |
182 | ||
183 | int pci_find_device_id(struct pci_device_id *ids, int index, | |
184 | struct udevice **devp) | |
185 | { | |
186 | struct udevice *bus; | |
187 | ||
188 | /* Scan all known buses */ | |
189 | for (uclass_first_device(UCLASS_PCI, &bus); | |
190 | bus; | |
191 | uclass_next_device(&bus)) { | |
192 | if (!pci_bus_find_devices(bus, ids, &index, devp)) | |
193 | return 0; | |
194 | } | |
195 | *devp = NULL; | |
196 | ||
197 | return -ENODEV; | |
198 | } | |
199 | ||
5c0bf647 SG |
200 | static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, |
201 | unsigned int device, int *indexp, | |
202 | struct udevice **devp) | |
203 | { | |
204 | struct pci_child_platdata *pplat; | |
205 | struct udevice *dev; | |
206 | ||
207 | for (device_find_first_child(bus, &dev); | |
208 | dev; | |
209 | device_find_next_child(&dev)) { | |
210 | pplat = dev_get_parent_platdata(dev); | |
211 | if (pplat->vendor == vendor && pplat->device == device) { | |
212 | if (!(*indexp)--) { | |
213 | *devp = dev; | |
214 | return 0; | |
215 | } | |
216 | } | |
217 | } | |
218 | ||
219 | return -ENODEV; | |
220 | } | |
221 | ||
222 | int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, | |
223 | struct udevice **devp) | |
224 | { | |
225 | struct udevice *bus; | |
226 | ||
227 | /* Scan all known buses */ | |
228 | for (uclass_first_device(UCLASS_PCI, &bus); | |
229 | bus; | |
230 | uclass_next_device(&bus)) { | |
231 | if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) | |
232 | return device_probe(*devp); | |
233 | } | |
234 | *devp = NULL; | |
235 | ||
236 | return -ENODEV; | |
237 | } | |
238 | ||
a0eb8356 SG |
239 | int dm_pci_find_class(uint find_class, int index, struct udevice **devp) |
240 | { | |
241 | struct udevice *dev; | |
242 | ||
243 | /* Scan all known buses */ | |
244 | for (pci_find_first_device(&dev); | |
245 | dev; | |
246 | pci_find_next_device(&dev)) { | |
247 | struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); | |
248 | ||
249 | if (pplat->class == find_class && !index--) { | |
250 | *devp = dev; | |
251 | return device_probe(*devp); | |
252 | } | |
253 | } | |
254 | *devp = NULL; | |
255 | ||
256 | return -ENODEV; | |
257 | } | |
258 | ||
ff3e077b SG |
259 | int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, |
260 | unsigned long value, enum pci_size_t size) | |
261 | { | |
262 | struct dm_pci_ops *ops; | |
263 | ||
264 | ops = pci_get_ops(bus); | |
265 | if (!ops->write_config) | |
266 | return -ENOSYS; | |
267 | return ops->write_config(bus, bdf, offset, value, size); | |
268 | } | |
269 | ||
319dba1f SG |
270 | int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset, |
271 | u32 clr, u32 set) | |
272 | { | |
273 | ulong val; | |
274 | int ret; | |
275 | ||
276 | ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32); | |
277 | if (ret) | |
278 | return ret; | |
279 | val &= ~clr; | |
280 | val |= set; | |
281 | ||
282 | return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32); | |
283 | } | |
284 | ||
ff3e077b SG |
285 | int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, |
286 | enum pci_size_t size) | |
287 | { | |
288 | struct udevice *bus; | |
289 | int ret; | |
290 | ||
983c6ba2 | 291 | ret = pci_get_bus(PCI_BUS(bdf), &bus); |
ff3e077b SG |
292 | if (ret) |
293 | return ret; | |
294 | ||
4d8615cb | 295 | return pci_bus_write_config(bus, bdf, offset, value, size); |
ff3e077b SG |
296 | } |
297 | ||
66afb4ed SG |
298 | int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, |
299 | enum pci_size_t size) | |
300 | { | |
301 | struct udevice *bus; | |
302 | ||
1e0f2263 | 303 | for (bus = dev; device_is_on_pci_bus(bus);) |
66afb4ed | 304 | bus = bus->parent; |
21ccce1b SG |
305 | return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, |
306 | size); | |
66afb4ed SG |
307 | } |
308 | ||
ff3e077b SG |
309 | int pci_write_config32(pci_dev_t bdf, int offset, u32 value) |
310 | { | |
311 | return pci_write_config(bdf, offset, value, PCI_SIZE_32); | |
312 | } | |
313 | ||
314 | int pci_write_config16(pci_dev_t bdf, int offset, u16 value) | |
315 | { | |
316 | return pci_write_config(bdf, offset, value, PCI_SIZE_16); | |
317 | } | |
318 | ||
319 | int pci_write_config8(pci_dev_t bdf, int offset, u8 value) | |
320 | { | |
321 | return pci_write_config(bdf, offset, value, PCI_SIZE_8); | |
322 | } | |
323 | ||
66afb4ed SG |
324 | int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) |
325 | { | |
326 | return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); | |
327 | } | |
328 | ||
329 | int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) | |
330 | { | |
331 | return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); | |
332 | } | |
333 | ||
334 | int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) | |
335 | { | |
336 | return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); | |
337 | } | |
338 | ||
ff3e077b SG |
339 | int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset, |
340 | unsigned long *valuep, enum pci_size_t size) | |
341 | { | |
342 | struct dm_pci_ops *ops; | |
343 | ||
344 | ops = pci_get_ops(bus); | |
345 | if (!ops->read_config) | |
346 | return -ENOSYS; | |
347 | return ops->read_config(bus, bdf, offset, valuep, size); | |
348 | } | |
349 | ||
350 | int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, | |
351 | enum pci_size_t size) | |
352 | { | |
353 | struct udevice *bus; | |
354 | int ret; | |
355 | ||
983c6ba2 | 356 | ret = pci_get_bus(PCI_BUS(bdf), &bus); |
ff3e077b SG |
357 | if (ret) |
358 | return ret; | |
359 | ||
4d8615cb | 360 | return pci_bus_read_config(bus, bdf, offset, valuep, size); |
ff3e077b SG |
361 | } |
362 | ||
66afb4ed SG |
363 | int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep, |
364 | enum pci_size_t size) | |
365 | { | |
366 | struct udevice *bus; | |
367 | ||
1e0f2263 | 368 | for (bus = dev; device_is_on_pci_bus(bus);) |
66afb4ed | 369 | bus = bus->parent; |
21ccce1b | 370 | return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, |
66afb4ed SG |
371 | size); |
372 | } | |
373 | ||
ff3e077b SG |
374 | int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) |
375 | { | |
376 | unsigned long value; | |
377 | int ret; | |
378 | ||
379 | ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); | |
380 | if (ret) | |
381 | return ret; | |
382 | *valuep = value; | |
383 | ||
384 | return 0; | |
385 | } | |
386 | ||
387 | int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) | |
388 | { | |
389 | unsigned long value; | |
390 | int ret; | |
391 | ||
392 | ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); | |
393 | if (ret) | |
394 | return ret; | |
395 | *valuep = value; | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
400 | int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) | |
401 | { | |
402 | unsigned long value; | |
403 | int ret; | |
404 | ||
405 | ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); | |
406 | if (ret) | |
407 | return ret; | |
408 | *valuep = value; | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
66afb4ed SG |
413 | int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep) |
414 | { | |
415 | unsigned long value; | |
416 | int ret; | |
417 | ||
418 | ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); | |
419 | if (ret) | |
420 | return ret; | |
421 | *valuep = value; | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep) | |
427 | { | |
428 | unsigned long value; | |
429 | int ret; | |
430 | ||
431 | ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); | |
432 | if (ret) | |
433 | return ret; | |
434 | *valuep = value; | |
435 | ||
436 | return 0; | |
437 | } | |
438 | ||
439 | int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep) | |
440 | { | |
441 | unsigned long value; | |
442 | int ret; | |
443 | ||
444 | ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); | |
445 | if (ret) | |
446 | return ret; | |
447 | *valuep = value; | |
448 | ||
449 | return 0; | |
450 | } | |
451 | ||
319dba1f SG |
452 | int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set) |
453 | { | |
454 | u8 val; | |
455 | int ret; | |
456 | ||
457 | ret = dm_pci_read_config8(dev, offset, &val); | |
458 | if (ret) | |
459 | return ret; | |
460 | val &= ~clr; | |
461 | val |= set; | |
462 | ||
463 | return dm_pci_write_config8(dev, offset, val); | |
464 | } | |
465 | ||
466 | int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set) | |
467 | { | |
468 | u16 val; | |
469 | int ret; | |
470 | ||
471 | ret = dm_pci_read_config16(dev, offset, &val); | |
472 | if (ret) | |
473 | return ret; | |
474 | val &= ~clr; | |
475 | val |= set; | |
476 | ||
477 | return dm_pci_write_config16(dev, offset, val); | |
478 | } | |
479 | ||
480 | int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set) | |
481 | { | |
482 | u32 val; | |
483 | int ret; | |
484 | ||
485 | ret = dm_pci_read_config32(dev, offset, &val); | |
486 | if (ret) | |
487 | return ret; | |
488 | val &= ~clr; | |
489 | val |= set; | |
490 | ||
491 | return dm_pci_write_config32(dev, offset, val); | |
492 | } | |
493 | ||
bbbcb526 BM |
494 | static void set_vga_bridge_bits(struct udevice *dev) |
495 | { | |
496 | struct udevice *parent = dev->parent; | |
497 | u16 bc; | |
498 | ||
499 | while (parent->seq != 0) { | |
500 | dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); | |
501 | bc |= PCI_BRIDGE_CTL_VGA; | |
502 | dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); | |
503 | parent = parent->parent; | |
504 | } | |
505 | } | |
506 | ||
ff3e077b SG |
507 | int pci_auto_config_devices(struct udevice *bus) |
508 | { | |
509 | struct pci_controller *hose = bus->uclass_priv; | |
bbbcb526 | 510 | struct pci_child_platdata *pplat; |
ff3e077b SG |
511 | unsigned int sub_bus; |
512 | struct udevice *dev; | |
513 | int ret; | |
514 | ||
515 | sub_bus = bus->seq; | |
516 | debug("%s: start\n", __func__); | |
517 | pciauto_config_init(hose); | |
518 | for (ret = device_find_first_child(bus, &dev); | |
519 | !ret && dev; | |
520 | ret = device_find_next_child(&dev)) { | |
ff3e077b | 521 | unsigned int max_bus; |
4d21455e | 522 | int ret; |
ff3e077b | 523 | |
ff3e077b | 524 | debug("%s: device %s\n", __func__, dev->name); |
5e23b8b4 | 525 | ret = dm_pciauto_config_device(dev); |
4d21455e SG |
526 | if (ret < 0) |
527 | return ret; | |
528 | max_bus = ret; | |
ff3e077b | 529 | sub_bus = max(sub_bus, max_bus); |
bbbcb526 BM |
530 | |
531 | pplat = dev_get_parent_platdata(dev); | |
532 | if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) | |
533 | set_vga_bridge_bits(dev); | |
ff3e077b SG |
534 | } |
535 | debug("%s: done\n", __func__); | |
536 | ||
537 | return sub_bus; | |
538 | } | |
539 | ||
badb9922 TT |
540 | int pci_generic_mmap_write_config( |
541 | struct udevice *bus, | |
542 | int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp), | |
543 | pci_dev_t bdf, | |
544 | uint offset, | |
545 | ulong value, | |
546 | enum pci_size_t size) | |
547 | { | |
548 | void *address; | |
549 | ||
550 | if (addr_f(bus, bdf, offset, &address) < 0) | |
551 | return 0; | |
552 | ||
553 | switch (size) { | |
554 | case PCI_SIZE_8: | |
555 | writeb(value, address); | |
556 | return 0; | |
557 | case PCI_SIZE_16: | |
558 | writew(value, address); | |
559 | return 0; | |
560 | case PCI_SIZE_32: | |
561 | writel(value, address); | |
562 | return 0; | |
563 | default: | |
564 | return -EINVAL; | |
565 | } | |
566 | } | |
567 | ||
568 | int pci_generic_mmap_read_config( | |
569 | struct udevice *bus, | |
570 | int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp), | |
571 | pci_dev_t bdf, | |
572 | uint offset, | |
573 | ulong *valuep, | |
574 | enum pci_size_t size) | |
575 | { | |
576 | void *address; | |
577 | ||
578 | if (addr_f(bus, bdf, offset, &address) < 0) { | |
579 | *valuep = pci_get_ff(size); | |
580 | return 0; | |
581 | } | |
582 | ||
583 | switch (size) { | |
584 | case PCI_SIZE_8: | |
585 | *valuep = readb(address); | |
586 | return 0; | |
587 | case PCI_SIZE_16: | |
588 | *valuep = readw(address); | |
589 | return 0; | |
590 | case PCI_SIZE_32: | |
591 | *valuep = readl(address); | |
592 | return 0; | |
593 | default: | |
594 | return -EINVAL; | |
595 | } | |
596 | } | |
597 | ||
5e23b8b4 | 598 | int dm_pci_hose_probe_bus(struct udevice *bus) |
ff3e077b | 599 | { |
ff3e077b SG |
600 | int sub_bus; |
601 | int ret; | |
602 | ||
603 | debug("%s\n", __func__); | |
ff3e077b SG |
604 | |
605 | sub_bus = pci_get_bus_max() + 1; | |
606 | debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); | |
5e23b8b4 | 607 | dm_pciauto_prescan_setup_bridge(bus, sub_bus); |
ff3e077b SG |
608 | |
609 | ret = device_probe(bus); | |
610 | if (ret) { | |
3129ace4 | 611 | debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, |
ff3e077b SG |
612 | ret); |
613 | return ret; | |
614 | } | |
615 | if (sub_bus != bus->seq) { | |
616 | printf("%s: Internal error, bus '%s' got seq %d, expected %d\n", | |
617 | __func__, bus->name, bus->seq, sub_bus); | |
618 | return -EPIPE; | |
619 | } | |
620 | sub_bus = pci_get_bus_max(); | |
5e23b8b4 | 621 | dm_pciauto_postscan_setup_bridge(bus, sub_bus); |
ff3e077b SG |
622 | |
623 | return sub_bus; | |
624 | } | |
625 | ||
aba92962 SG |
626 | /** |
627 | * pci_match_one_device - Tell if a PCI device structure has a matching | |
628 | * PCI device id structure | |
629 | * @id: single PCI device id structure to match | |
0367bd4d | 630 | * @find: the PCI device id structure to match against |
aba92962 | 631 | * |
0367bd4d HZ |
632 | * Returns true if the finding pci_device_id structure matched or false if |
633 | * there is no match. | |
aba92962 SG |
634 | */ |
635 | static bool pci_match_one_id(const struct pci_device_id *id, | |
636 | const struct pci_device_id *find) | |
637 | { | |
638 | if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && | |
639 | (id->device == PCI_ANY_ID || id->device == find->device) && | |
640 | (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && | |
641 | (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && | |
642 | !((id->class ^ find->class) & id->class_mask)) | |
643 | return true; | |
644 | ||
645 | return false; | |
646 | } | |
647 | ||
648 | /** | |
649 | * pci_find_and_bind_driver() - Find and bind the right PCI driver | |
650 | * | |
651 | * This only looks at certain fields in the descriptor. | |
5dbcf3a0 SG |
652 | * |
653 | * @parent: Parent bus | |
654 | * @find_id: Specification of the driver to find | |
655 | * @bdf: Bus/device/function addreess - see PCI_BDF() | |
656 | * @devp: Returns a pointer to the device created | |
657 | * @return 0 if OK, -EPERM if the device is not needed before relocation and | |
658 | * therefore was not created, other -ve value on error | |
aba92962 SG |
659 | */ |
660 | static int pci_find_and_bind_driver(struct udevice *parent, | |
5dbcf3a0 SG |
661 | struct pci_device_id *find_id, |
662 | pci_dev_t bdf, struct udevice **devp) | |
aba92962 SG |
663 | { |
664 | struct pci_driver_entry *start, *entry; | |
02e4d38d | 665 | ofnode node = ofnode_null(); |
aba92962 SG |
666 | const char *drv; |
667 | int n_ents; | |
668 | int ret; | |
669 | char name[30], *str; | |
08fc7b8f | 670 | bool bridge; |
aba92962 SG |
671 | |
672 | *devp = NULL; | |
673 | ||
674 | debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, | |
675 | find_id->vendor, find_id->device); | |
02e4d38d MV |
676 | |
677 | /* Determine optional OF node */ | |
678 | pci_dev_find_ofnode(parent, bdf, &node); | |
679 | ||
a6cd597a MW |
680 | if (ofnode_valid(node) && !ofnode_is_available(node)) { |
681 | debug("%s: Ignoring disabled device\n", __func__); | |
682 | return -EPERM; | |
683 | } | |
684 | ||
aba92962 SG |
685 | start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); |
686 | n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); | |
687 | for (entry = start; entry != start + n_ents; entry++) { | |
688 | const struct pci_device_id *id; | |
689 | struct udevice *dev; | |
690 | const struct driver *drv; | |
691 | ||
692 | for (id = entry->match; | |
693 | id->vendor || id->subvendor || id->class_mask; | |
694 | id++) { | |
695 | if (!pci_match_one_id(id, find_id)) | |
696 | continue; | |
697 | ||
698 | drv = entry->driver; | |
08fc7b8f BM |
699 | |
700 | /* | |
701 | * In the pre-relocation phase, we only bind devices | |
702 | * whose driver has the DM_FLAG_PRE_RELOC set, to save | |
703 | * precious memory space as on some platforms as that | |
704 | * space is pretty limited (ie: using Cache As RAM). | |
705 | */ | |
706 | if (!(gd->flags & GD_FLG_RELOC) && | |
707 | !(drv->flags & DM_FLAG_PRE_RELOC)) | |
5dbcf3a0 | 708 | return -EPERM; |
08fc7b8f | 709 | |
aba92962 SG |
710 | /* |
711 | * We could pass the descriptor to the driver as | |
712 | * platdata (instead of NULL) and allow its bind() | |
713 | * method to return -ENOENT if it doesn't support this | |
714 | * device. That way we could continue the search to | |
715 | * find another driver. For now this doesn't seem | |
716 | * necesssary, so just bind the first match. | |
717 | */ | |
02e4d38d MV |
718 | ret = device_bind_ofnode(parent, drv, drv->name, NULL, |
719 | node, &dev); | |
aba92962 SG |
720 | if (ret) |
721 | goto error; | |
722 | debug("%s: Match found: %s\n", __func__, drv->name); | |
ed698aa7 | 723 | dev->driver_data = id->driver_data; |
aba92962 SG |
724 | *devp = dev; |
725 | return 0; | |
726 | } | |
727 | } | |
728 | ||
08fc7b8f BM |
729 | bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; |
730 | /* | |
731 | * In the pre-relocation phase, we only bind bridge devices to save | |
732 | * precious memory space as on some platforms as that space is pretty | |
733 | * limited (ie: using Cache As RAM). | |
734 | */ | |
735 | if (!(gd->flags & GD_FLG_RELOC) && !bridge) | |
5dbcf3a0 | 736 | return -EPERM; |
08fc7b8f | 737 | |
aba92962 | 738 | /* Bind a generic driver so that the device can be used */ |
4d8615cb BM |
739 | sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), |
740 | PCI_FUNC(bdf)); | |
aba92962 SG |
741 | str = strdup(name); |
742 | if (!str) | |
743 | return -ENOMEM; | |
08fc7b8f BM |
744 | drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; |
745 | ||
02e4d38d | 746 | ret = device_bind_driver_to_node(parent, drv, str, node, devp); |
aba92962 | 747 | if (ret) { |
3129ace4 | 748 | debug("%s: Failed to bind generic driver: %d\n", __func__, ret); |
c42640c7 | 749 | free(str); |
aba92962 SG |
750 | return ret; |
751 | } | |
752 | debug("%s: No match found: bound generic driver instead\n", __func__); | |
753 | ||
754 | return 0; | |
755 | ||
756 | error: | |
757 | debug("%s: No match found: error %d\n", __func__, ret); | |
758 | return ret; | |
759 | } | |
760 | ||
ff3e077b SG |
761 | int pci_bind_bus_devices(struct udevice *bus) |
762 | { | |
763 | ulong vendor, device; | |
764 | ulong header_type; | |
4d8615cb | 765 | pci_dev_t bdf, end; |
ff3e077b SG |
766 | bool found_multi; |
767 | int ret; | |
768 | ||
769 | found_multi = false; | |
4d8615cb BM |
770 | end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, |
771 | PCI_MAX_PCI_FUNCTIONS - 1); | |
6d9f5b03 | 772 | for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end; |
4d8615cb | 773 | bdf += PCI_BDF(0, 0, 1)) { |
ff3e077b SG |
774 | struct pci_child_platdata *pplat; |
775 | struct udevice *dev; | |
776 | ulong class; | |
777 | ||
64e45f73 BM |
778 | if (!PCI_FUNC(bdf)) |
779 | found_multi = false; | |
4d8615cb | 780 | if (PCI_FUNC(bdf) && !found_multi) |
ff3e077b | 781 | continue; |
2a87f7fd | 782 | |
ff3e077b | 783 | /* Check only the first access, we don't expect problems */ |
2a87f7fd HZ |
784 | ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, |
785 | PCI_SIZE_16); | |
ff3e077b SG |
786 | if (ret) |
787 | goto error; | |
2a87f7fd | 788 | |
ff3e077b SG |
789 | if (vendor == 0xffff || vendor == 0x0000) |
790 | continue; | |
791 | ||
2a87f7fd HZ |
792 | pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, |
793 | &header_type, PCI_SIZE_8); | |
794 | ||
4d8615cb | 795 | if (!PCI_FUNC(bdf)) |
ff3e077b SG |
796 | found_multi = header_type & 0x80; |
797 | ||
0911569b | 798 | debug("%s: bus %d/%s: found device %x, function %d", __func__, |
4d8615cb BM |
799 | bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); |
800 | pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, | |
ff3e077b | 801 | PCI_SIZE_16); |
4d8615cb | 802 | pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, |
aba92962 SG |
803 | PCI_SIZE_32); |
804 | class >>= 8; | |
ff3e077b SG |
805 | |
806 | /* Find this device in the device tree */ | |
4d8615cb | 807 | ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); |
0911569b | 808 | debug(": find ret=%d\n", ret); |
ff3e077b | 809 | |
8bd42525 | 810 | /* If nothing in the device tree, bind a device */ |
ff3e077b | 811 | if (ret == -ENODEV) { |
aba92962 SG |
812 | struct pci_device_id find_id; |
813 | ulong val; | |
814 | ||
815 | memset(&find_id, '\0', sizeof(find_id)); | |
816 | find_id.vendor = vendor; | |
817 | find_id.device = device; | |
818 | find_id.class = class; | |
819 | if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { | |
4d8615cb | 820 | pci_bus_read_config(bus, bdf, |
aba92962 SG |
821 | PCI_SUBSYSTEM_VENDOR_ID, |
822 | &val, PCI_SIZE_32); | |
823 | find_id.subvendor = val & 0xffff; | |
824 | find_id.subdevice = val >> 16; | |
825 | } | |
4d8615cb | 826 | ret = pci_find_and_bind_driver(bus, &find_id, bdf, |
aba92962 | 827 | &dev); |
ff3e077b | 828 | } |
5dbcf3a0 SG |
829 | if (ret == -EPERM) |
830 | continue; | |
831 | else if (ret) | |
ff3e077b SG |
832 | return ret; |
833 | ||
834 | /* Update the platform data */ | |
5dbcf3a0 SG |
835 | pplat = dev_get_parent_platdata(dev); |
836 | pplat->devfn = PCI_MASK_BUS(bdf); | |
837 | pplat->vendor = vendor; | |
838 | pplat->device = device; | |
839 | pplat->class = class; | |
ff3e077b SG |
840 | } |
841 | ||
842 | return 0; | |
843 | error: | |
844 | printf("Cannot read bus configuration: %d\n", ret); | |
845 | ||
846 | return ret; | |
847 | } | |
848 | ||
f2825f6e CG |
849 | static void decode_regions(struct pci_controller *hose, ofnode parent_node, |
850 | ofnode node) | |
ff3e077b SG |
851 | { |
852 | int pci_addr_cells, addr_cells, size_cells; | |
853 | int cells_per_record; | |
854 | const u32 *prop; | |
855 | int len; | |
856 | int i; | |
857 | ||
61e51bab | 858 | prop = ofnode_get_property(node, "ranges", &len); |
f2825f6e CG |
859 | if (!prop) { |
860 | debug("%s: Cannot decode regions\n", __func__); | |
861 | return; | |
862 | } | |
863 | ||
878d68c0 SG |
864 | pci_addr_cells = ofnode_read_simple_addr_cells(node); |
865 | addr_cells = ofnode_read_simple_addr_cells(parent_node); | |
866 | size_cells = ofnode_read_simple_size_cells(node); | |
ff3e077b SG |
867 | |
868 | /* PCI addresses are always 3-cells */ | |
869 | len /= sizeof(u32); | |
870 | cells_per_record = pci_addr_cells + addr_cells + size_cells; | |
871 | hose->region_count = 0; | |
872 | debug("%s: len=%d, cells_per_record=%d\n", __func__, len, | |
873 | cells_per_record); | |
874 | for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) { | |
875 | u64 pci_addr, addr, size; | |
876 | int space_code; | |
877 | u32 flags; | |
878 | int type; | |
9526d83a | 879 | int pos; |
ff3e077b SG |
880 | |
881 | if (len < cells_per_record) | |
882 | break; | |
883 | flags = fdt32_to_cpu(prop[0]); | |
884 | space_code = (flags >> 24) & 3; | |
885 | pci_addr = fdtdec_get_number(prop + 1, 2); | |
886 | prop += pci_addr_cells; | |
887 | addr = fdtdec_get_number(prop, addr_cells); | |
888 | prop += addr_cells; | |
889 | size = fdtdec_get_number(prop, size_cells); | |
890 | prop += size_cells; | |
dee37fc9 MY |
891 | debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n", |
892 | __func__, hose->region_count, pci_addr, addr, size, space_code); | |
ff3e077b SG |
893 | if (space_code & 2) { |
894 | type = flags & (1U << 30) ? PCI_REGION_PREFETCH : | |
895 | PCI_REGION_MEM; | |
896 | } else if (space_code & 1) { | |
897 | type = PCI_REGION_IO; | |
898 | } else { | |
899 | continue; | |
900 | } | |
52ba9073 TT |
901 | |
902 | if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) && | |
903 | type == PCI_REGION_MEM && upper_32_bits(pci_addr)) { | |
904 | debug(" - beyond the 32-bit boundary, ignoring\n"); | |
905 | continue; | |
906 | } | |
907 | ||
9526d83a SG |
908 | pos = -1; |
909 | for (i = 0; i < hose->region_count; i++) { | |
910 | if (hose->regions[i].flags == type) | |
911 | pos = i; | |
912 | } | |
913 | if (pos == -1) | |
914 | pos = hose->region_count++; | |
915 | debug(" - type=%d, pos=%d\n", type, pos); | |
916 | pci_set_region(hose->regions + pos, pci_addr, addr, size, type); | |
ff3e077b SG |
917 | } |
918 | ||
919 | /* Add a region for our local memory */ | |
664758c3 BM |
920 | #ifdef CONFIG_NR_DRAM_BANKS |
921 | bd_t *bd = gd->bd; | |
922 | ||
1eaf7800 | 923 | if (!bd) |
f2825f6e | 924 | return; |
1eaf7800 | 925 | |
664758c3 | 926 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) { |
d94d9aa6 TR |
927 | if (hose->region_count == MAX_PCI_REGIONS) { |
928 | pr_err("maximum number of regions parsed, aborting\n"); | |
929 | break; | |
930 | } | |
931 | ||
664758c3 BM |
932 | if (bd->bi_dram[i].size) { |
933 | pci_set_region(hose->regions + hose->region_count++, | |
934 | bd->bi_dram[i].start, | |
935 | bd->bi_dram[i].start, | |
936 | bd->bi_dram[i].size, | |
937 | PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); | |
938 | } | |
939 | } | |
940 | #else | |
941 | phys_addr_t base = 0, size; | |
942 | ||
2084c5af SG |
943 | size = gd->ram_size; |
944 | #ifdef CONFIG_SYS_SDRAM_BASE | |
945 | base = CONFIG_SYS_SDRAM_BASE; | |
946 | #endif | |
947 | if (gd->pci_ram_top && gd->pci_ram_top < base + size) | |
948 | size = gd->pci_ram_top - base; | |
ee1109bb BM |
949 | if (size) |
950 | pci_set_region(hose->regions + hose->region_count++, base, | |
951 | base, size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); | |
664758c3 | 952 | #endif |
ff3e077b | 953 | |
f2825f6e | 954 | return; |
ff3e077b SG |
955 | } |
956 | ||
957 | static int pci_uclass_pre_probe(struct udevice *bus) | |
958 | { | |
959 | struct pci_controller *hose; | |
ff3e077b SG |
960 | |
961 | debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, | |
962 | bus->parent->name); | |
963 | hose = bus->uclass_priv; | |
964 | ||
965 | /* For bridges, use the top-level PCI controller */ | |
65f62b1c | 966 | if (!device_is_on_pci_bus(bus)) { |
ff3e077b | 967 | hose->ctlr = bus; |
f2825f6e | 968 | decode_regions(hose, dev_ofnode(bus->parent), dev_ofnode(bus)); |
ff3e077b SG |
969 | } else { |
970 | struct pci_controller *parent_hose; | |
971 | ||
972 | parent_hose = dev_get_uclass_priv(bus->parent); | |
973 | hose->ctlr = parent_hose->bus; | |
974 | } | |
975 | hose->bus = bus; | |
976 | hose->first_busno = bus->seq; | |
977 | hose->last_busno = bus->seq; | |
2206ac24 SG |
978 | hose->skip_auto_config_until_reloc = |
979 | dev_read_bool(bus, "u-boot,skip-auto-config-until-reloc"); | |
ff3e077b SG |
980 | |
981 | return 0; | |
982 | } | |
983 | ||
984 | static int pci_uclass_post_probe(struct udevice *bus) | |
985 | { | |
2206ac24 | 986 | struct pci_controller *hose = dev_get_uclass_priv(bus); |
ff3e077b SG |
987 | int ret; |
988 | ||
ff3e077b SG |
989 | debug("%s: probing bus %d\n", __func__, bus->seq); |
990 | ret = pci_bind_bus_devices(bus); | |
991 | if (ret) | |
992 | return ret; | |
993 | ||
2206ac24 SG |
994 | if (CONFIG_IS_ENABLED(PCI_PNP) && |
995 | (!hose->skip_auto_config_until_reloc || | |
996 | (gd->flags & GD_FLG_RELOC))) { | |
997 | ret = pci_auto_config_devices(bus); | |
998 | if (ret < 0) | |
999 | return log_msg_ret("pci auto-config", ret); | |
1000 | } | |
ff3e077b | 1001 | |
348b744b BM |
1002 | #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) |
1003 | /* | |
1004 | * Per Intel FSP specification, we should call FSP notify API to | |
1005 | * inform FSP that PCI enumeration has been done so that FSP will | |
1006 | * do any necessary initialization as required by the chipset's | |
1007 | * BIOS Writer's Guide (BWG). | |
1008 | * | |
1009 | * Unfortunately we have to put this call here as with driver model, | |
1010 | * the enumeration is all done on a lazy basis as needed, so until | |
1011 | * something is touched on PCI it won't happen. | |
1012 | * | |
1013 | * Note we only call this 1) after U-Boot is relocated, and 2) | |
1014 | * root bus has finished probing. | |
1015 | */ | |
4d21455e | 1016 | if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) { |
348b744b | 1017 | ret = fsp_init_phase_pci(); |
4d21455e SG |
1018 | if (ret) |
1019 | return ret; | |
1020 | } | |
348b744b BM |
1021 | #endif |
1022 | ||
4d21455e | 1023 | return 0; |
ff3e077b SG |
1024 | } |
1025 | ||
1026 | static int pci_uclass_child_post_bind(struct udevice *dev) | |
1027 | { | |
1028 | struct pci_child_platdata *pplat; | |
ff3e077b | 1029 | |
bf501595 | 1030 | if (!dev_of_valid(dev)) |
ff3e077b SG |
1031 | return 0; |
1032 | ||
ff3e077b | 1033 | pplat = dev_get_parent_platdata(dev); |
1f6b08b9 BM |
1034 | |
1035 | /* Extract vendor id and device id if available */ | |
1036 | ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device); | |
1037 | ||
1038 | /* Extract the devfn from fdt_pci_addr */ | |
b5214200 | 1039 | pplat->devfn = pci_get_devfn(dev); |
ff3e077b SG |
1040 | |
1041 | return 0; | |
1042 | } | |
1043 | ||
4d8615cb BM |
1044 | static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf, |
1045 | uint offset, ulong *valuep, | |
1046 | enum pci_size_t size) | |
ff3e077b SG |
1047 | { |
1048 | struct pci_controller *hose = bus->uclass_priv; | |
ff3e077b SG |
1049 | |
1050 | return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); | |
1051 | } | |
1052 | ||
4d8615cb BM |
1053 | static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, |
1054 | uint offset, ulong value, | |
1055 | enum pci_size_t size) | |
ff3e077b SG |
1056 | { |
1057 | struct pci_controller *hose = bus->uclass_priv; | |
ff3e077b SG |
1058 | |
1059 | return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); | |
1060 | } | |
1061 | ||
76c3fbcd SG |
1062 | static int skip_to_next_device(struct udevice *bus, struct udevice **devp) |
1063 | { | |
1064 | struct udevice *dev; | |
1065 | int ret = 0; | |
1066 | ||
1067 | /* | |
1068 | * Scan through all the PCI controllers. On x86 there will only be one | |
1069 | * but that is not necessarily true on other hardware. | |
1070 | */ | |
1071 | do { | |
1072 | device_find_first_child(bus, &dev); | |
1073 | if (dev) { | |
1074 | *devp = dev; | |
1075 | return 0; | |
1076 | } | |
1077 | ret = uclass_next_device(&bus); | |
1078 | if (ret) | |
1079 | return ret; | |
1080 | } while (bus); | |
1081 | ||
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | int pci_find_next_device(struct udevice **devp) | |
1086 | { | |
1087 | struct udevice *child = *devp; | |
1088 | struct udevice *bus = child->parent; | |
1089 | int ret; | |
1090 | ||
1091 | /* First try all the siblings */ | |
1092 | *devp = NULL; | |
1093 | while (child) { | |
1094 | device_find_next_child(&child); | |
1095 | if (child) { | |
1096 | *devp = child; | |
1097 | return 0; | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | /* We ran out of siblings. Try the next bus */ | |
1102 | ret = uclass_next_device(&bus); | |
1103 | if (ret) | |
1104 | return ret; | |
1105 | ||
1106 | return bus ? skip_to_next_device(bus, devp) : 0; | |
1107 | } | |
1108 | ||
1109 | int pci_find_first_device(struct udevice **devp) | |
1110 | { | |
1111 | struct udevice *bus; | |
1112 | int ret; | |
1113 | ||
1114 | *devp = NULL; | |
1115 | ret = uclass_first_device(UCLASS_PCI, &bus); | |
1116 | if (ret) | |
1117 | return ret; | |
1118 | ||
1119 | return skip_to_next_device(bus, devp); | |
1120 | } | |
1121 | ||
9289db6c SG |
1122 | ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) |
1123 | { | |
1124 | switch (size) { | |
1125 | case PCI_SIZE_8: | |
1126 | return (value >> ((offset & 3) * 8)) & 0xff; | |
1127 | case PCI_SIZE_16: | |
1128 | return (value >> ((offset & 2) * 8)) & 0xffff; | |
1129 | default: | |
1130 | return value; | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, | |
1135 | enum pci_size_t size) | |
1136 | { | |
1137 | uint off_mask; | |
1138 | uint val_mask, shift; | |
1139 | ulong ldata, mask; | |
1140 | ||
1141 | switch (size) { | |
1142 | case PCI_SIZE_8: | |
1143 | off_mask = 3; | |
1144 | val_mask = 0xff; | |
1145 | break; | |
1146 | case PCI_SIZE_16: | |
1147 | off_mask = 2; | |
1148 | val_mask = 0xffff; | |
1149 | break; | |
1150 | default: | |
1151 | return value; | |
1152 | } | |
1153 | shift = (offset & off_mask) * 8; | |
1154 | ldata = (value & val_mask) << shift; | |
1155 | mask = val_mask << shift; | |
1156 | value = (old & ~mask) | ldata; | |
1157 | ||
1158 | return value; | |
1159 | } | |
1160 | ||
f9260336 SG |
1161 | int pci_get_regions(struct udevice *dev, struct pci_region **iop, |
1162 | struct pci_region **memp, struct pci_region **prefp) | |
1163 | { | |
1164 | struct udevice *bus = pci_get_controller(dev); | |
1165 | struct pci_controller *hose = dev_get_uclass_priv(bus); | |
1166 | int i; | |
1167 | ||
1168 | *iop = NULL; | |
1169 | *memp = NULL; | |
1170 | *prefp = NULL; | |
1171 | for (i = 0; i < hose->region_count; i++) { | |
1172 | switch (hose->regions[i].flags) { | |
1173 | case PCI_REGION_IO: | |
1174 | if (!*iop || (*iop)->size < hose->regions[i].size) | |
1175 | *iop = hose->regions + i; | |
1176 | break; | |
1177 | case PCI_REGION_MEM: | |
1178 | if (!*memp || (*memp)->size < hose->regions[i].size) | |
1179 | *memp = hose->regions + i; | |
1180 | break; | |
1181 | case (PCI_REGION_MEM | PCI_REGION_PREFETCH): | |
1182 | if (!*prefp || (*prefp)->size < hose->regions[i].size) | |
1183 | *prefp = hose->regions + i; | |
1184 | break; | |
1185 | } | |
1186 | } | |
1187 | ||
1188 | return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); | |
1189 | } | |
1190 | ||
bab17cf1 SG |
1191 | u32 dm_pci_read_bar32(struct udevice *dev, int barnum) |
1192 | { | |
1193 | u32 addr; | |
1194 | int bar; | |
1195 | ||
1196 | bar = PCI_BASE_ADDRESS_0 + barnum * 4; | |
1197 | dm_pci_read_config32(dev, bar, &addr); | |
1198 | if (addr & PCI_BASE_ADDRESS_SPACE_IO) | |
1199 | return addr & PCI_BASE_ADDRESS_IO_MASK; | |
1200 | else | |
1201 | return addr & PCI_BASE_ADDRESS_MEM_MASK; | |
1202 | } | |
1203 | ||
9d731c82 SG |
1204 | void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr) |
1205 | { | |
1206 | int bar; | |
1207 | ||
1208 | bar = PCI_BASE_ADDRESS_0 + barnum * 4; | |
1209 | dm_pci_write_config32(dev, bar, addr); | |
1210 | } | |
1211 | ||
21d1fe7e SG |
1212 | static int _dm_pci_bus_to_phys(struct udevice *ctlr, |
1213 | pci_addr_t bus_addr, unsigned long flags, | |
1214 | unsigned long skip_mask, phys_addr_t *pa) | |
1215 | { | |
1216 | struct pci_controller *hose = dev_get_uclass_priv(ctlr); | |
1217 | struct pci_region *res; | |
1218 | int i; | |
1219 | ||
6f95d89c CG |
1220 | if (hose->region_count == 0) { |
1221 | *pa = bus_addr; | |
1222 | return 0; | |
1223 | } | |
1224 | ||
21d1fe7e SG |
1225 | for (i = 0; i < hose->region_count; i++) { |
1226 | res = &hose->regions[i]; | |
1227 | ||
1228 | if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) | |
1229 | continue; | |
1230 | ||
1231 | if (res->flags & skip_mask) | |
1232 | continue; | |
1233 | ||
1234 | if (bus_addr >= res->bus_start && | |
1235 | (bus_addr - res->bus_start) < res->size) { | |
1236 | *pa = (bus_addr - res->bus_start + res->phys_start); | |
1237 | return 0; | |
1238 | } | |
1239 | } | |
1240 | ||
1241 | return 1; | |
1242 | } | |
1243 | ||
1244 | phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, | |
1245 | unsigned long flags) | |
1246 | { | |
1247 | phys_addr_t phys_addr = 0; | |
1248 | struct udevice *ctlr; | |
1249 | int ret; | |
1250 | ||
1251 | /* The root controller has the region information */ | |
1252 | ctlr = pci_get_controller(dev); | |
1253 | ||
1254 | /* | |
1255 | * if PCI_REGION_MEM is set we do a two pass search with preference | |
1256 | * on matches that don't have PCI_REGION_SYS_MEMORY set | |
1257 | */ | |
1258 | if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { | |
1259 | ret = _dm_pci_bus_to_phys(ctlr, bus_addr, | |
1260 | flags, PCI_REGION_SYS_MEMORY, | |
1261 | &phys_addr); | |
1262 | if (!ret) | |
1263 | return phys_addr; | |
1264 | } | |
1265 | ||
1266 | ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr); | |
1267 | ||
1268 | if (ret) | |
1269 | puts("pci_hose_bus_to_phys: invalid physical address\n"); | |
1270 | ||
1271 | return phys_addr; | |
1272 | } | |
1273 | ||
1274 | int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, | |
1275 | unsigned long flags, unsigned long skip_mask, | |
1276 | pci_addr_t *ba) | |
1277 | { | |
1278 | struct pci_region *res; | |
1279 | struct udevice *ctlr; | |
1280 | pci_addr_t bus_addr; | |
1281 | int i; | |
1282 | struct pci_controller *hose; | |
1283 | ||
1284 | /* The root controller has the region information */ | |
1285 | ctlr = pci_get_controller(dev); | |
1286 | hose = dev_get_uclass_priv(ctlr); | |
1287 | ||
6f95d89c CG |
1288 | if (hose->region_count == 0) { |
1289 | *ba = phys_addr; | |
1290 | return 0; | |
1291 | } | |
1292 | ||
21d1fe7e SG |
1293 | for (i = 0; i < hose->region_count; i++) { |
1294 | res = &hose->regions[i]; | |
1295 | ||
1296 | if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) | |
1297 | continue; | |
1298 | ||
1299 | if (res->flags & skip_mask) | |
1300 | continue; | |
1301 | ||
1302 | bus_addr = phys_addr - res->phys_start + res->bus_start; | |
1303 | ||
1304 | if (bus_addr >= res->bus_start && | |
1305 | (bus_addr - res->bus_start) < res->size) { | |
1306 | *ba = bus_addr; | |
1307 | return 0; | |
1308 | } | |
1309 | } | |
1310 | ||
1311 | return 1; | |
1312 | } | |
1313 | ||
1314 | pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, | |
1315 | unsigned long flags) | |
1316 | { | |
1317 | pci_addr_t bus_addr = 0; | |
1318 | int ret; | |
1319 | ||
1320 | /* | |
1321 | * if PCI_REGION_MEM is set we do a two pass search with preference | |
1322 | * on matches that don't have PCI_REGION_SYS_MEMORY set | |
1323 | */ | |
1324 | if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { | |
1325 | ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, | |
1326 | PCI_REGION_SYS_MEMORY, &bus_addr); | |
1327 | if (!ret) | |
1328 | return bus_addr; | |
1329 | } | |
1330 | ||
1331 | ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr); | |
1332 | ||
1333 | if (ret) | |
1334 | puts("pci_hose_phys_to_bus: invalid physical address\n"); | |
1335 | ||
1336 | return bus_addr; | |
1337 | } | |
1338 | ||
0b143d8a AM |
1339 | static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags, |
1340 | int ea_off) | |
1341 | { | |
1342 | int ea_cnt, i, entry_size; | |
1343 | int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2; | |
1344 | u32 ea_entry; | |
1345 | phys_addr_t addr; | |
1346 | ||
1347 | /* EA capability structure header */ | |
1348 | dm_pci_read_config32(dev, ea_off, &ea_entry); | |
1349 | ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK; | |
1350 | ea_off += PCI_EA_FIRST_ENT; | |
1351 | ||
1352 | for (i = 0; i < ea_cnt; i++, ea_off += entry_size) { | |
1353 | /* Entry header */ | |
1354 | dm_pci_read_config32(dev, ea_off, &ea_entry); | |
1355 | entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2; | |
1356 | ||
1357 | if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id) | |
1358 | continue; | |
1359 | ||
1360 | /* Base address, 1st DW */ | |
1361 | dm_pci_read_config32(dev, ea_off + 4, &ea_entry); | |
1362 | addr = ea_entry & PCI_EA_FIELD_MASK; | |
1363 | if (ea_entry & PCI_EA_IS_64) { | |
1364 | /* Base address, 2nd DW, skip over 4B MaxOffset */ | |
1365 | dm_pci_read_config32(dev, ea_off + 12, &ea_entry); | |
1366 | addr |= ((u64)ea_entry) << 32; | |
1367 | } | |
1368 | ||
1369 | /* size ignored for now */ | |
1370 | return map_physmem(addr, flags, 0); | |
1371 | } | |
1372 | ||
1373 | return 0; | |
1374 | } | |
1375 | ||
21d1fe7e SG |
1376 | void *dm_pci_map_bar(struct udevice *dev, int bar, int flags) |
1377 | { | |
1378 | pci_addr_t pci_bus_addr; | |
1379 | u32 bar_response; | |
0b143d8a AM |
1380 | int ea_off; |
1381 | ||
1382 | /* | |
1383 | * if the function supports Enhanced Allocation use that instead of | |
1384 | * BARs | |
1385 | */ | |
1386 | ea_off = dm_pci_find_capability(dev, PCI_CAP_ID_EA); | |
1387 | if (ea_off) | |
1388 | return dm_pci_map_ea_bar(dev, bar, flags, ea_off); | |
21d1fe7e SG |
1389 | |
1390 | /* read BAR address */ | |
1391 | dm_pci_read_config32(dev, bar, &bar_response); | |
1392 | pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); | |
1393 | ||
1394 | /* | |
1395 | * Pass "0" as the length argument to pci_bus_to_virt. The arg | |
1396 | * isn't actualy used on any platform because u-boot assumes a static | |
1397 | * linear mapping. In the future, this could read the BAR size | |
1398 | * and pass that as the size if needed. | |
1399 | */ | |
1400 | return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE); | |
1401 | } | |
1402 | ||
a8c5f8d3 | 1403 | static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap) |
dac01fd8 | 1404 | { |
dac01fd8 BM |
1405 | int ttl = PCI_FIND_CAP_TTL; |
1406 | u8 id; | |
1407 | u16 ent; | |
dac01fd8 BM |
1408 | |
1409 | dm_pci_read_config8(dev, pos, &pos); | |
a8c5f8d3 | 1410 | |
dac01fd8 BM |
1411 | while (ttl--) { |
1412 | if (pos < PCI_STD_HEADER_SIZEOF) | |
1413 | break; | |
1414 | pos &= ~3; | |
1415 | dm_pci_read_config16(dev, pos, &ent); | |
1416 | ||
1417 | id = ent & 0xff; | |
1418 | if (id == 0xff) | |
1419 | break; | |
1420 | if (id == cap) | |
1421 | return pos; | |
1422 | pos = (ent >> 8); | |
1423 | } | |
1424 | ||
1425 | return 0; | |
1426 | } | |
1427 | ||
a8c5f8d3 BM |
1428 | int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap) |
1429 | { | |
1430 | return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT, | |
1431 | cap); | |
1432 | } | |
1433 | ||
1434 | int dm_pci_find_capability(struct udevice *dev, int cap) | |
1435 | { | |
1436 | u16 status; | |
1437 | u8 header_type; | |
1438 | u8 pos; | |
1439 | ||
1440 | dm_pci_read_config16(dev, PCI_STATUS, &status); | |
1441 | if (!(status & PCI_STATUS_CAP_LIST)) | |
1442 | return 0; | |
1443 | ||
1444 | dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type); | |
1445 | if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS) | |
1446 | pos = PCI_CB_CAPABILITY_LIST; | |
1447 | else | |
1448 | pos = PCI_CAPABILITY_LIST; | |
1449 | ||
1450 | return _dm_pci_find_next_capability(dev, pos, cap); | |
1451 | } | |
1452 | ||
1453 | int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap) | |
dac01fd8 BM |
1454 | { |
1455 | u32 header; | |
1456 | int ttl; | |
1457 | int pos = PCI_CFG_SPACE_SIZE; | |
1458 | ||
1459 | /* minimum 8 bytes per capability */ | |
1460 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | |
1461 | ||
a8c5f8d3 BM |
1462 | if (start) |
1463 | pos = start; | |
1464 | ||
dac01fd8 BM |
1465 | dm_pci_read_config32(dev, pos, &header); |
1466 | /* | |
1467 | * If we have no capabilities, this is indicated by cap ID, | |
1468 | * cap version and next pointer all being 0. | |
1469 | */ | |
1470 | if (header == 0) | |
1471 | return 0; | |
1472 | ||
1473 | while (ttl--) { | |
1474 | if (PCI_EXT_CAP_ID(header) == cap) | |
1475 | return pos; | |
1476 | ||
1477 | pos = PCI_EXT_CAP_NEXT(header); | |
1478 | if (pos < PCI_CFG_SPACE_SIZE) | |
1479 | break; | |
1480 | ||
1481 | dm_pci_read_config32(dev, pos, &header); | |
1482 | } | |
1483 | ||
1484 | return 0; | |
1485 | } | |
1486 | ||
a8c5f8d3 BM |
1487 | int dm_pci_find_ext_capability(struct udevice *dev, int cap) |
1488 | { | |
1489 | return dm_pci_find_next_ext_capability(dev, 0, cap); | |
1490 | } | |
1491 | ||
b8e1f827 AM |
1492 | int dm_pci_flr(struct udevice *dev) |
1493 | { | |
1494 | int pcie_off; | |
1495 | u32 cap; | |
1496 | ||
1497 | /* look for PCI Express Capability */ | |
1498 | pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP); | |
1499 | if (!pcie_off) | |
1500 | return -ENOENT; | |
1501 | ||
1502 | /* check FLR capability */ | |
1503 | dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap); | |
1504 | if (!(cap & PCI_EXP_DEVCAP_FLR)) | |
1505 | return -ENOENT; | |
1506 | ||
1507 | dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0, | |
1508 | PCI_EXP_DEVCTL_BCR_FLR); | |
1509 | ||
1510 | /* wait 100ms, per PCI spec */ | |
1511 | mdelay(100); | |
1512 | ||
1513 | return 0; | |
1514 | } | |
1515 | ||
ff3e077b SG |
1516 | UCLASS_DRIVER(pci) = { |
1517 | .id = UCLASS_PCI, | |
1518 | .name = "pci", | |
2bb02e4f | 1519 | .flags = DM_UC_FLAG_SEQ_ALIAS, |
91195485 | 1520 | .post_bind = dm_scan_fdt_dev, |
ff3e077b SG |
1521 | .pre_probe = pci_uclass_pre_probe, |
1522 | .post_probe = pci_uclass_post_probe, | |
1523 | .child_post_bind = pci_uclass_child_post_bind, | |
1524 | .per_device_auto_alloc_size = sizeof(struct pci_controller), | |
1525 | .per_child_platdata_auto_alloc_size = | |
1526 | sizeof(struct pci_child_platdata), | |
1527 | }; | |
1528 | ||
1529 | static const struct dm_pci_ops pci_bridge_ops = { | |
1530 | .read_config = pci_bridge_read_config, | |
1531 | .write_config = pci_bridge_write_config, | |
1532 | }; | |
1533 | ||
1534 | static const struct udevice_id pci_bridge_ids[] = { | |
1535 | { .compatible = "pci-bridge" }, | |
1536 | { } | |
1537 | }; | |
1538 | ||
1539 | U_BOOT_DRIVER(pci_bridge_drv) = { | |
1540 | .name = "pci_bridge_drv", | |
1541 | .id = UCLASS_PCI, | |
1542 | .of_match = pci_bridge_ids, | |
1543 | .ops = &pci_bridge_ops, | |
1544 | }; | |
1545 | ||
1546 | UCLASS_DRIVER(pci_generic) = { | |
1547 | .id = UCLASS_PCI_GENERIC, | |
1548 | .name = "pci_generic", | |
1549 | }; | |
1550 | ||
1551 | static const struct udevice_id pci_generic_ids[] = { | |
1552 | { .compatible = "pci-generic" }, | |
1553 | { } | |
1554 | }; | |
1555 | ||
1556 | U_BOOT_DRIVER(pci_generic_drv) = { | |
1557 | .name = "pci_generic_drv", | |
1558 | .id = UCLASS_PCI_GENERIC, | |
1559 | .of_match = pci_generic_ids, | |
1560 | }; | |
e578b92c SW |
1561 | |
1562 | void pci_init(void) | |
1563 | { | |
1564 | struct udevice *bus; | |
1565 | ||
1566 | /* | |
1567 | * Enumerate all known controller devices. Enumeration has the side- | |
1568 | * effect of probing them, so PCIe devices will be enumerated too. | |
1569 | */ | |
60ee6094 | 1570 | for (uclass_first_device_check(UCLASS_PCI, &bus); |
e578b92c | 1571 | bus; |
60ee6094 | 1572 | uclass_next_device_check(&bus)) { |
e578b92c SW |
1573 | ; |
1574 | } | |
1575 | } |