]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * probe.c - PCI detection and setup code | |
3 | */ | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/delay.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/pci.h> | |
50230713 | 9 | #include <linux/of_device.h> |
de335bb4 | 10 | #include <linux/of_pci.h> |
589fcc23 | 11 | #include <linux/pci_hotplug.h> |
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/module.h> | |
14 | #include <linux/cpumask.h> | |
7d715a6c | 15 | #include <linux/pci-aspm.h> |
b07461a8 | 16 | #include <linux/aer.h> |
29dbe1f0 | 17 | #include <linux/acpi.h> |
788858eb | 18 | #include <linux/irqdomain.h> |
d963f651 | 19 | #include <linux/pm_runtime.h> |
bc56b9e0 | 20 | #include "pci.h" |
1da177e4 LT |
21 | |
22 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ | |
23 | #define CARDBUS_RESERVE_BUSNR 3 | |
1da177e4 | 24 | |
0b950f0f | 25 | static struct resource busn_resource = { |
67cdc827 YL |
26 | .name = "PCI busn", |
27 | .start = 0, | |
28 | .end = 255, | |
29 | .flags = IORESOURCE_BUS, | |
30 | }; | |
31 | ||
1da177e4 LT |
32 | /* Ugh. Need to stop exporting this to modules. */ |
33 | LIST_HEAD(pci_root_buses); | |
34 | EXPORT_SYMBOL(pci_root_buses); | |
35 | ||
5cc62c20 YL |
36 | static LIST_HEAD(pci_domain_busn_res_list); |
37 | ||
38 | struct pci_domain_busn_res { | |
39 | struct list_head list; | |
40 | struct resource res; | |
41 | int domain_nr; | |
42 | }; | |
43 | ||
44 | static struct resource *get_pci_domain_busn_res(int domain_nr) | |
45 | { | |
46 | struct pci_domain_busn_res *r; | |
47 | ||
48 | list_for_each_entry(r, &pci_domain_busn_res_list, list) | |
49 | if (r->domain_nr == domain_nr) | |
50 | return &r->res; | |
51 | ||
52 | r = kzalloc(sizeof(*r), GFP_KERNEL); | |
53 | if (!r) | |
54 | return NULL; | |
55 | ||
56 | r->domain_nr = domain_nr; | |
57 | r->res.start = 0; | |
58 | r->res.end = 0xff; | |
59 | r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; | |
60 | ||
61 | list_add_tail(&r->list, &pci_domain_busn_res_list); | |
62 | ||
63 | return &r->res; | |
64 | } | |
65 | ||
70308923 GKH |
66 | static int find_anything(struct device *dev, void *data) |
67 | { | |
68 | return 1; | |
69 | } | |
1da177e4 | 70 | |
ed4aaadb ZY |
71 | /* |
72 | * Some device drivers need know if pci is initiated. | |
73 | * Basically, we think pci is not initiated when there | |
70308923 | 74 | * is no device to be found on the pci_bus_type. |
ed4aaadb ZY |
75 | */ |
76 | int no_pci_devices(void) | |
77 | { | |
70308923 GKH |
78 | struct device *dev; |
79 | int no_devices; | |
ed4aaadb | 80 | |
70308923 GKH |
81 | dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); |
82 | no_devices = (dev == NULL); | |
83 | put_device(dev); | |
84 | return no_devices; | |
85 | } | |
ed4aaadb ZY |
86 | EXPORT_SYMBOL(no_pci_devices); |
87 | ||
1da177e4 LT |
88 | /* |
89 | * PCI Bus Class | |
90 | */ | |
fd7d1ced | 91 | static void release_pcibus_dev(struct device *dev) |
1da177e4 | 92 | { |
fd7d1ced | 93 | struct pci_bus *pci_bus = to_pci_bus(dev); |
1da177e4 | 94 | |
ff0387c3 | 95 | put_device(pci_bus->bridge); |
2fe2abf8 | 96 | pci_bus_remove_resources(pci_bus); |
98d9f30c | 97 | pci_release_bus_of_node(pci_bus); |
1da177e4 LT |
98 | kfree(pci_bus); |
99 | } | |
100 | ||
101 | static struct class pcibus_class = { | |
102 | .name = "pci_bus", | |
fd7d1ced | 103 | .dev_release = &release_pcibus_dev, |
56039e65 | 104 | .dev_groups = pcibus_groups, |
1da177e4 LT |
105 | }; |
106 | ||
107 | static int __init pcibus_class_init(void) | |
108 | { | |
109 | return class_register(&pcibus_class); | |
110 | } | |
111 | postcore_initcall(pcibus_class_init); | |
112 | ||
6ac665c6 | 113 | static u64 pci_size(u64 base, u64 maxbase, u64 mask) |
1da177e4 | 114 | { |
6ac665c6 | 115 | u64 size = mask & maxbase; /* Find the significant bits */ |
1da177e4 LT |
116 | if (!size) |
117 | return 0; | |
118 | ||
119 | /* Get the lowest of them to find the decode size, and | |
120 | from that the extent. */ | |
121 | size = (size & ~(size-1)) - 1; | |
122 | ||
123 | /* base == maxbase can be valid only if the BAR has | |
124 | already been programmed with all 1s. */ | |
125 | if (base == maxbase && ((base | size) & mask) != mask) | |
126 | return 0; | |
127 | ||
128 | return size; | |
129 | } | |
130 | ||
28c6821a | 131 | static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) |
6ac665c6 | 132 | { |
8d6a6a47 | 133 | u32 mem_type; |
28c6821a | 134 | unsigned long flags; |
8d6a6a47 | 135 | |
6ac665c6 | 136 | if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { |
28c6821a BH |
137 | flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; |
138 | flags |= IORESOURCE_IO; | |
139 | return flags; | |
6ac665c6 | 140 | } |
07eddf3d | 141 | |
28c6821a BH |
142 | flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; |
143 | flags |= IORESOURCE_MEM; | |
144 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) | |
145 | flags |= IORESOURCE_PREFETCH; | |
07eddf3d | 146 | |
8d6a6a47 BH |
147 | mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; |
148 | switch (mem_type) { | |
149 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
150 | break; | |
151 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
0ff9514b | 152 | /* 1M mem BAR treated as 32-bit BAR */ |
8d6a6a47 BH |
153 | break; |
154 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
28c6821a BH |
155 | flags |= IORESOURCE_MEM_64; |
156 | break; | |
8d6a6a47 | 157 | default: |
0ff9514b | 158 | /* mem unknown type treated as 32-bit BAR */ |
8d6a6a47 BH |
159 | break; |
160 | } | |
28c6821a | 161 | return flags; |
07eddf3d YL |
162 | } |
163 | ||
808e34e2 ZK |
164 | #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) |
165 | ||
0b400c7e YZ |
166 | /** |
167 | * pci_read_base - read a PCI BAR | |
168 | * @dev: the PCI device | |
169 | * @type: type of the BAR | |
170 | * @res: resource buffer to be filled in | |
171 | * @pos: BAR position in the config space | |
172 | * | |
173 | * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. | |
6ac665c6 | 174 | */ |
0b400c7e | 175 | int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
3c78bc61 | 176 | struct resource *res, unsigned int pos) |
07eddf3d | 177 | { |
dc5205ef | 178 | u32 l = 0, sz = 0, mask; |
23b13bc7 | 179 | u64 l64, sz64, mask64; |
253d2e54 | 180 | u16 orig_cmd; |
cf4d1cf5 | 181 | struct pci_bus_region region, inverted_region; |
6ac665c6 | 182 | |
1ed67439 | 183 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; |
6ac665c6 | 184 | |
0ff9514b | 185 | /* No printks while decoding is disabled! */ |
253d2e54 JP |
186 | if (!dev->mmio_always_on) { |
187 | pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); | |
808e34e2 ZK |
188 | if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { |
189 | pci_write_config_word(dev, PCI_COMMAND, | |
190 | orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); | |
191 | } | |
253d2e54 JP |
192 | } |
193 | ||
6ac665c6 MW |
194 | res->name = pci_name(dev); |
195 | ||
196 | pci_read_config_dword(dev, pos, &l); | |
1ed67439 | 197 | pci_write_config_dword(dev, pos, l | mask); |
6ac665c6 MW |
198 | pci_read_config_dword(dev, pos, &sz); |
199 | pci_write_config_dword(dev, pos, l); | |
200 | ||
201 | /* | |
202 | * All bits set in sz means the device isn't working properly. | |
45aa23b4 BH |
203 | * If the BAR isn't implemented, all bits must be 0. If it's a |
204 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit | |
205 | * 1 must be clear. | |
6ac665c6 | 206 | */ |
f795d86a MS |
207 | if (sz == 0xffffffff) |
208 | sz = 0; | |
6ac665c6 MW |
209 | |
210 | /* | |
211 | * I don't know how l can have all bits set. Copied from old code. | |
212 | * Maybe it fixes a bug on some ancient platform. | |
213 | */ | |
214 | if (l == 0xffffffff) | |
215 | l = 0; | |
216 | ||
217 | if (type == pci_bar_unknown) { | |
28c6821a BH |
218 | res->flags = decode_bar(dev, l); |
219 | res->flags |= IORESOURCE_SIZEALIGN; | |
220 | if (res->flags & IORESOURCE_IO) { | |
f795d86a MS |
221 | l64 = l & PCI_BASE_ADDRESS_IO_MASK; |
222 | sz64 = sz & PCI_BASE_ADDRESS_IO_MASK; | |
223 | mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT; | |
6ac665c6 | 224 | } else { |
f795d86a MS |
225 | l64 = l & PCI_BASE_ADDRESS_MEM_MASK; |
226 | sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; | |
227 | mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; | |
6ac665c6 MW |
228 | } |
229 | } else { | |
7a6d312b BH |
230 | if (l & PCI_ROM_ADDRESS_ENABLE) |
231 | res->flags |= IORESOURCE_ROM_ENABLE; | |
f795d86a MS |
232 | l64 = l & PCI_ROM_ADDRESS_MASK; |
233 | sz64 = sz & PCI_ROM_ADDRESS_MASK; | |
76dc5268 | 234 | mask64 = PCI_ROM_ADDRESS_MASK; |
6ac665c6 MW |
235 | } |
236 | ||
28c6821a | 237 | if (res->flags & IORESOURCE_MEM_64) { |
6ac665c6 MW |
238 | pci_read_config_dword(dev, pos + 4, &l); |
239 | pci_write_config_dword(dev, pos + 4, ~0); | |
240 | pci_read_config_dword(dev, pos + 4, &sz); | |
241 | pci_write_config_dword(dev, pos + 4, l); | |
242 | ||
243 | l64 |= ((u64)l << 32); | |
244 | sz64 |= ((u64)sz << 32); | |
f795d86a MS |
245 | mask64 |= ((u64)~0 << 32); |
246 | } | |
6ac665c6 | 247 | |
f795d86a MS |
248 | if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) |
249 | pci_write_config_word(dev, PCI_COMMAND, orig_cmd); | |
6ac665c6 | 250 | |
f795d86a MS |
251 | if (!sz64) |
252 | goto fail; | |
6ac665c6 | 253 | |
f795d86a | 254 | sz64 = pci_size(l64, sz64, mask64); |
7e79c5f8 MS |
255 | if (!sz64) { |
256 | dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", | |
257 | pos); | |
f795d86a | 258 | goto fail; |
7e79c5f8 | 259 | } |
f795d86a MS |
260 | |
261 | if (res->flags & IORESOURCE_MEM_64) { | |
3a9ad0b4 YL |
262 | if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8) |
263 | && sz64 > 0x100000000ULL) { | |
23b13bc7 BH |
264 | res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; |
265 | res->start = 0; | |
266 | res->end = 0; | |
f795d86a MS |
267 | dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", |
268 | pos, (unsigned long long)sz64); | |
23b13bc7 | 269 | goto out; |
c7dabef8 BH |
270 | } |
271 | ||
3a9ad0b4 | 272 | if ((sizeof(pci_bus_addr_t) < 8) && l) { |
31e9dd25 | 273 | /* Above 32-bit boundary; try to reallocate */ |
c83bd900 | 274 | res->flags |= IORESOURCE_UNSET; |
72dc5601 BH |
275 | res->start = 0; |
276 | res->end = sz64; | |
f795d86a MS |
277 | dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", |
278 | pos, (unsigned long long)l64); | |
72dc5601 | 279 | goto out; |
6ac665c6 | 280 | } |
6ac665c6 MW |
281 | } |
282 | ||
f795d86a MS |
283 | region.start = l64; |
284 | region.end = l64 + sz64; | |
285 | ||
fc279850 YL |
286 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
287 | pcibios_resource_to_bus(dev->bus, &inverted_region, res); | |
cf4d1cf5 KH |
288 | |
289 | /* | |
290 | * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is | |
291 | * the corresponding resource address (the physical address used by | |
292 | * the CPU. Converting that resource address back to a bus address | |
293 | * should yield the original BAR value: | |
294 | * | |
295 | * resource_to_bus(bus_to_resource(A)) == A | |
296 | * | |
297 | * If it doesn't, CPU accesses to "bus_to_resource(A)" will not | |
298 | * be claimed by the device. | |
299 | */ | |
300 | if (inverted_region.start != region.start) { | |
cf4d1cf5 | 301 | res->flags |= IORESOURCE_UNSET; |
cf4d1cf5 | 302 | res->start = 0; |
26370fc6 | 303 | res->end = region.end - region.start; |
f795d86a MS |
304 | dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n", |
305 | pos, (unsigned long long)region.start); | |
cf4d1cf5 | 306 | } |
96ddef25 | 307 | |
0ff9514b BH |
308 | goto out; |
309 | ||
310 | ||
311 | fail: | |
312 | res->flags = 0; | |
313 | out: | |
31e9dd25 | 314 | if (res->flags) |
33963e30 | 315 | dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); |
0ff9514b | 316 | |
28c6821a | 317 | return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; |
07eddf3d YL |
318 | } |
319 | ||
1da177e4 LT |
320 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
321 | { | |
6ac665c6 | 322 | unsigned int pos, reg; |
07eddf3d | 323 | |
ad67b437 PB |
324 | if (dev->non_compliant_bars) |
325 | return; | |
326 | ||
6ac665c6 MW |
327 | for (pos = 0; pos < howmany; pos++) { |
328 | struct resource *res = &dev->resource[pos]; | |
1da177e4 | 329 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
6ac665c6 | 330 | pos += __pci_read_base(dev, pci_bar_unknown, res, reg); |
1da177e4 | 331 | } |
6ac665c6 | 332 | |
1da177e4 | 333 | if (rom) { |
6ac665c6 | 334 | struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; |
1da177e4 | 335 | dev->rom_base_reg = rom; |
6ac665c6 | 336 | res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | |
92b19ff5 | 337 | IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; |
6ac665c6 | 338 | __pci_read_base(dev, pci_bar_mem32, res, rom); |
1da177e4 LT |
339 | } |
340 | } | |
341 | ||
15856ad5 | 342 | static void pci_read_bridge_io(struct pci_bus *child) |
1da177e4 LT |
343 | { |
344 | struct pci_dev *dev = child->self; | |
345 | u8 io_base_lo, io_limit_lo; | |
2b28ae19 | 346 | unsigned long io_mask, io_granularity, base, limit; |
5bfa14ed | 347 | struct pci_bus_region region; |
2b28ae19 BH |
348 | struct resource *res; |
349 | ||
350 | io_mask = PCI_IO_RANGE_MASK; | |
351 | io_granularity = 0x1000; | |
352 | if (dev->io_window_1k) { | |
353 | /* Support 1K I/O space granularity */ | |
354 | io_mask = PCI_IO_1K_RANGE_MASK; | |
355 | io_granularity = 0x400; | |
356 | } | |
1da177e4 | 357 | |
1da177e4 LT |
358 | res = child->resource[0]; |
359 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); | |
360 | pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); | |
2b28ae19 BH |
361 | base = (io_base_lo & io_mask) << 8; |
362 | limit = (io_limit_lo & io_mask) << 8; | |
1da177e4 LT |
363 | |
364 | if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { | |
365 | u16 io_base_hi, io_limit_hi; | |
8f38eaca | 366 | |
1da177e4 LT |
367 | pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); |
368 | pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); | |
8f38eaca BH |
369 | base |= ((unsigned long) io_base_hi << 16); |
370 | limit |= ((unsigned long) io_limit_hi << 16); | |
1da177e4 LT |
371 | } |
372 | ||
5dde383e | 373 | if (base <= limit) { |
1da177e4 | 374 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; |
5bfa14ed | 375 | region.start = base; |
2b28ae19 | 376 | region.end = limit + io_granularity - 1; |
fc279850 | 377 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
c7dabef8 | 378 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
1da177e4 | 379 | } |
fa27b2d1 BH |
380 | } |
381 | ||
15856ad5 | 382 | static void pci_read_bridge_mmio(struct pci_bus *child) |
fa27b2d1 BH |
383 | { |
384 | struct pci_dev *dev = child->self; | |
385 | u16 mem_base_lo, mem_limit_lo; | |
386 | unsigned long base, limit; | |
5bfa14ed | 387 | struct pci_bus_region region; |
fa27b2d1 | 388 | struct resource *res; |
1da177e4 LT |
389 | |
390 | res = child->resource[1]; | |
391 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); | |
392 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); | |
8f38eaca BH |
393 | base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; |
394 | limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; | |
5dde383e | 395 | if (base <= limit) { |
1da177e4 | 396 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
5bfa14ed BH |
397 | region.start = base; |
398 | region.end = limit + 0xfffff; | |
fc279850 | 399 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
c7dabef8 | 400 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
1da177e4 | 401 | } |
fa27b2d1 BH |
402 | } |
403 | ||
15856ad5 | 404 | static void pci_read_bridge_mmio_pref(struct pci_bus *child) |
fa27b2d1 BH |
405 | { |
406 | struct pci_dev *dev = child->self; | |
407 | u16 mem_base_lo, mem_limit_lo; | |
7fc986d8 | 408 | u64 base64, limit64; |
3a9ad0b4 | 409 | pci_bus_addr_t base, limit; |
5bfa14ed | 410 | struct pci_bus_region region; |
fa27b2d1 | 411 | struct resource *res; |
1da177e4 LT |
412 | |
413 | res = child->resource[2]; | |
414 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); | |
415 | pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); | |
7fc986d8 YL |
416 | base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; |
417 | limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; | |
1da177e4 LT |
418 | |
419 | if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { | |
420 | u32 mem_base_hi, mem_limit_hi; | |
8f38eaca | 421 | |
1da177e4 LT |
422 | pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); |
423 | pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); | |
424 | ||
425 | /* | |
426 | * Some bridges set the base > limit by default, and some | |
427 | * (broken) BIOSes do not initialize them. If we find | |
428 | * this, just assume they are not being used. | |
429 | */ | |
430 | if (mem_base_hi <= mem_limit_hi) { | |
7fc986d8 YL |
431 | base64 |= (u64) mem_base_hi << 32; |
432 | limit64 |= (u64) mem_limit_hi << 32; | |
1da177e4 LT |
433 | } |
434 | } | |
7fc986d8 | 435 | |
3a9ad0b4 YL |
436 | base = (pci_bus_addr_t) base64; |
437 | limit = (pci_bus_addr_t) limit64; | |
7fc986d8 YL |
438 | |
439 | if (base != base64) { | |
440 | dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", | |
441 | (unsigned long long) base64); | |
442 | return; | |
443 | } | |
444 | ||
5dde383e | 445 | if (base <= limit) { |
1f82de10 YL |
446 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | |
447 | IORESOURCE_MEM | IORESOURCE_PREFETCH; | |
448 | if (res->flags & PCI_PREF_RANGE_TYPE_64) | |
449 | res->flags |= IORESOURCE_MEM_64; | |
5bfa14ed BH |
450 | region.start = base; |
451 | region.end = limit + 0xfffff; | |
fc279850 | 452 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
c7dabef8 | 453 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
1da177e4 LT |
454 | } |
455 | } | |
456 | ||
15856ad5 | 457 | void pci_read_bridge_bases(struct pci_bus *child) |
fa27b2d1 BH |
458 | { |
459 | struct pci_dev *dev = child->self; | |
2fe2abf8 | 460 | struct resource *res; |
fa27b2d1 BH |
461 | int i; |
462 | ||
463 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | |
464 | return; | |
465 | ||
b918c62e YL |
466 | dev_info(&dev->dev, "PCI bridge to %pR%s\n", |
467 | &child->busn_res, | |
fa27b2d1 BH |
468 | dev->transparent ? " (subtractive decode)" : ""); |
469 | ||
2fe2abf8 BH |
470 | pci_bus_remove_resources(child); |
471 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) | |
472 | child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; | |
473 | ||
fa27b2d1 BH |
474 | pci_read_bridge_io(child); |
475 | pci_read_bridge_mmio(child); | |
476 | pci_read_bridge_mmio_pref(child); | |
2adf7516 BH |
477 | |
478 | if (dev->transparent) { | |
2fe2abf8 | 479 | pci_bus_for_each_resource(child->parent, res, i) { |
d739a099 | 480 | if (res && res->flags) { |
2fe2abf8 BH |
481 | pci_bus_add_resource(child, res, |
482 | PCI_SUBTRACTIVE_DECODE); | |
2adf7516 BH |
483 | dev_printk(KERN_DEBUG, &dev->dev, |
484 | " bridge window %pR (subtractive decode)\n", | |
2fe2abf8 BH |
485 | res); |
486 | } | |
2adf7516 BH |
487 | } |
488 | } | |
fa27b2d1 BH |
489 | } |
490 | ||
670ba0c8 | 491 | static struct pci_bus *pci_alloc_bus(struct pci_bus *parent) |
1da177e4 LT |
492 | { |
493 | struct pci_bus *b; | |
494 | ||
f5afe806 | 495 | b = kzalloc(sizeof(*b), GFP_KERNEL); |
05013486 BH |
496 | if (!b) |
497 | return NULL; | |
498 | ||
499 | INIT_LIST_HEAD(&b->node); | |
500 | INIT_LIST_HEAD(&b->children); | |
501 | INIT_LIST_HEAD(&b->devices); | |
502 | INIT_LIST_HEAD(&b->slots); | |
503 | INIT_LIST_HEAD(&b->resources); | |
504 | b->max_bus_speed = PCI_SPEED_UNKNOWN; | |
505 | b->cur_bus_speed = PCI_SPEED_UNKNOWN; | |
670ba0c8 CM |
506 | #ifdef CONFIG_PCI_DOMAINS_GENERIC |
507 | if (parent) | |
508 | b->domain_nr = parent->domain_nr; | |
509 | #endif | |
1da177e4 LT |
510 | return b; |
511 | } | |
512 | ||
5c3f18cc | 513 | static void devm_pci_release_host_bridge_dev(struct device *dev) |
70efde2a JL |
514 | { |
515 | struct pci_host_bridge *bridge = to_pci_host_bridge(dev); | |
516 | ||
517 | if (bridge->release_fn) | |
518 | bridge->release_fn(bridge); | |
5c3f18cc | 519 | } |
70efde2a | 520 | |
5c3f18cc LP |
521 | static void pci_release_host_bridge_dev(struct device *dev) |
522 | { | |
523 | devm_pci_release_host_bridge_dev(dev); | |
524 | pci_free_host_bridge(to_pci_host_bridge(dev)); | |
70efde2a JL |
525 | } |
526 | ||
a52d1443 | 527 | struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) |
7b543663 YL |
528 | { |
529 | struct pci_host_bridge *bridge; | |
530 | ||
59094065 | 531 | bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); |
05013486 BH |
532 | if (!bridge) |
533 | return NULL; | |
7b543663 | 534 | |
05013486 | 535 | INIT_LIST_HEAD(&bridge->windows); |
a1c0050a | 536 | bridge->dev.release = pci_release_host_bridge_dev; |
37d6a0a6 | 537 | |
7b543663 YL |
538 | return bridge; |
539 | } | |
a52d1443 | 540 | EXPORT_SYMBOL(pci_alloc_host_bridge); |
7b543663 | 541 | |
5c3f18cc LP |
542 | struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, |
543 | size_t priv) | |
544 | { | |
545 | struct pci_host_bridge *bridge; | |
546 | ||
547 | bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL); | |
548 | if (!bridge) | |
549 | return NULL; | |
550 | ||
551 | INIT_LIST_HEAD(&bridge->windows); | |
552 | bridge->dev.release = devm_pci_release_host_bridge_dev; | |
553 | ||
554 | return bridge; | |
555 | } | |
556 | EXPORT_SYMBOL(devm_pci_alloc_host_bridge); | |
557 | ||
dff79b91 LP |
558 | void pci_free_host_bridge(struct pci_host_bridge *bridge) |
559 | { | |
560 | pci_free_resource_list(&bridge->windows); | |
561 | ||
562 | kfree(bridge); | |
563 | } | |
564 | EXPORT_SYMBOL(pci_free_host_bridge); | |
565 | ||
0b950f0f | 566 | static const unsigned char pcix_bus_speed[] = { |
9be60ca0 MW |
567 | PCI_SPEED_UNKNOWN, /* 0 */ |
568 | PCI_SPEED_66MHz_PCIX, /* 1 */ | |
569 | PCI_SPEED_100MHz_PCIX, /* 2 */ | |
570 | PCI_SPEED_133MHz_PCIX, /* 3 */ | |
571 | PCI_SPEED_UNKNOWN, /* 4 */ | |
572 | PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ | |
573 | PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ | |
574 | PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ | |
575 | PCI_SPEED_UNKNOWN, /* 8 */ | |
576 | PCI_SPEED_66MHz_PCIX_266, /* 9 */ | |
577 | PCI_SPEED_100MHz_PCIX_266, /* A */ | |
578 | PCI_SPEED_133MHz_PCIX_266, /* B */ | |
579 | PCI_SPEED_UNKNOWN, /* C */ | |
580 | PCI_SPEED_66MHz_PCIX_533, /* D */ | |
581 | PCI_SPEED_100MHz_PCIX_533, /* E */ | |
582 | PCI_SPEED_133MHz_PCIX_533 /* F */ | |
583 | }; | |
584 | ||
343e51ae | 585 | const unsigned char pcie_link_speed[] = { |
3749c51a MW |
586 | PCI_SPEED_UNKNOWN, /* 0 */ |
587 | PCIE_SPEED_2_5GT, /* 1 */ | |
588 | PCIE_SPEED_5_0GT, /* 2 */ | |
9dfd97fe | 589 | PCIE_SPEED_8_0GT, /* 3 */ |
3749c51a MW |
590 | PCI_SPEED_UNKNOWN, /* 4 */ |
591 | PCI_SPEED_UNKNOWN, /* 5 */ | |
592 | PCI_SPEED_UNKNOWN, /* 6 */ | |
593 | PCI_SPEED_UNKNOWN, /* 7 */ | |
594 | PCI_SPEED_UNKNOWN, /* 8 */ | |
595 | PCI_SPEED_UNKNOWN, /* 9 */ | |
596 | PCI_SPEED_UNKNOWN, /* A */ | |
597 | PCI_SPEED_UNKNOWN, /* B */ | |
598 | PCI_SPEED_UNKNOWN, /* C */ | |
599 | PCI_SPEED_UNKNOWN, /* D */ | |
600 | PCI_SPEED_UNKNOWN, /* E */ | |
601 | PCI_SPEED_UNKNOWN /* F */ | |
602 | }; | |
603 | ||
604 | void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) | |
605 | { | |
231afea1 | 606 | bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; |
3749c51a MW |
607 | } |
608 | EXPORT_SYMBOL_GPL(pcie_update_link_speed); | |
609 | ||
45b4cdd5 MW |
610 | static unsigned char agp_speeds[] = { |
611 | AGP_UNKNOWN, | |
612 | AGP_1X, | |
613 | AGP_2X, | |
614 | AGP_4X, | |
615 | AGP_8X | |
616 | }; | |
617 | ||
618 | static enum pci_bus_speed agp_speed(int agp3, int agpstat) | |
619 | { | |
620 | int index = 0; | |
621 | ||
622 | if (agpstat & 4) | |
623 | index = 3; | |
624 | else if (agpstat & 2) | |
625 | index = 2; | |
626 | else if (agpstat & 1) | |
627 | index = 1; | |
628 | else | |
629 | goto out; | |
f7625980 | 630 | |
45b4cdd5 MW |
631 | if (agp3) { |
632 | index += 2; | |
633 | if (index == 5) | |
634 | index = 0; | |
635 | } | |
636 | ||
637 | out: | |
638 | return agp_speeds[index]; | |
639 | } | |
640 | ||
9be60ca0 MW |
641 | static void pci_set_bus_speed(struct pci_bus *bus) |
642 | { | |
643 | struct pci_dev *bridge = bus->self; | |
644 | int pos; | |
645 | ||
45b4cdd5 MW |
646 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); |
647 | if (!pos) | |
648 | pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); | |
649 | if (pos) { | |
650 | u32 agpstat, agpcmd; | |
651 | ||
652 | pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); | |
653 | bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); | |
654 | ||
655 | pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); | |
656 | bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); | |
657 | } | |
658 | ||
9be60ca0 MW |
659 | pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); |
660 | if (pos) { | |
661 | u16 status; | |
662 | enum pci_bus_speed max; | |
9be60ca0 | 663 | |
7793eeab BH |
664 | pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, |
665 | &status); | |
666 | ||
667 | if (status & PCI_X_SSTATUS_533MHZ) { | |
9be60ca0 | 668 | max = PCI_SPEED_133MHz_PCIX_533; |
7793eeab | 669 | } else if (status & PCI_X_SSTATUS_266MHZ) { |
9be60ca0 | 670 | max = PCI_SPEED_133MHz_PCIX_266; |
7793eeab | 671 | } else if (status & PCI_X_SSTATUS_133MHZ) { |
3c78bc61 | 672 | if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) |
9be60ca0 | 673 | max = PCI_SPEED_133MHz_PCIX_ECC; |
3c78bc61 | 674 | else |
9be60ca0 | 675 | max = PCI_SPEED_133MHz_PCIX; |
9be60ca0 MW |
676 | } else { |
677 | max = PCI_SPEED_66MHz_PCIX; | |
678 | } | |
679 | ||
680 | bus->max_bus_speed = max; | |
7793eeab BH |
681 | bus->cur_bus_speed = pcix_bus_speed[ |
682 | (status & PCI_X_SSTATUS_FREQ) >> 6]; | |
9be60ca0 MW |
683 | |
684 | return; | |
685 | } | |
686 | ||
fdfe1511 | 687 | if (pci_is_pcie(bridge)) { |
9be60ca0 MW |
688 | u32 linkcap; |
689 | u16 linksta; | |
690 | ||
59875ae4 | 691 | pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); |
231afea1 | 692 | bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; |
9be60ca0 | 693 | |
59875ae4 | 694 | pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); |
9be60ca0 MW |
695 | pcie_update_link_speed(bus, linksta); |
696 | } | |
697 | } | |
698 | ||
44aa0c65 MZ |
699 | static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) |
700 | { | |
b165e2b6 MZ |
701 | struct irq_domain *d; |
702 | ||
44aa0c65 MZ |
703 | /* |
704 | * Any firmware interface that can resolve the msi_domain | |
705 | * should be called from here. | |
706 | */ | |
b165e2b6 | 707 | d = pci_host_bridge_of_msi_domain(bus); |
471036b2 SS |
708 | if (!d) |
709 | d = pci_host_bridge_acpi_msi_domain(bus); | |
44aa0c65 | 710 | |
788858eb JO |
711 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN |
712 | /* | |
713 | * If no IRQ domain was found via the OF tree, try looking it up | |
714 | * directly through the fwnode_handle. | |
715 | */ | |
716 | if (!d) { | |
717 | struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus); | |
718 | ||
719 | if (fwnode) | |
720 | d = irq_find_matching_fwnode(fwnode, | |
721 | DOMAIN_BUS_PCI_MSI); | |
722 | } | |
723 | #endif | |
724 | ||
b165e2b6 | 725 | return d; |
44aa0c65 MZ |
726 | } |
727 | ||
728 | static void pci_set_bus_msi_domain(struct pci_bus *bus) | |
729 | { | |
730 | struct irq_domain *d; | |
38ea72bd | 731 | struct pci_bus *b; |
44aa0c65 MZ |
732 | |
733 | /* | |
38ea72bd AW |
734 | * The bus can be a root bus, a subordinate bus, or a virtual bus |
735 | * created by an SR-IOV device. Walk up to the first bridge device | |
736 | * found or derive the domain from the host bridge. | |
44aa0c65 | 737 | */ |
38ea72bd AW |
738 | for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { |
739 | if (b->self) | |
740 | d = dev_get_msi_domain(&b->self->dev); | |
741 | } | |
742 | ||
743 | if (!d) | |
744 | d = pci_host_bridge_msi_domain(b); | |
44aa0c65 MZ |
745 | |
746 | dev_set_msi_domain(&bus->dev, d); | |
747 | } | |
748 | ||
cea9bc0b | 749 | static int pci_register_host_bridge(struct pci_host_bridge *bridge) |
37d6a0a6 AB |
750 | { |
751 | struct device *parent = bridge->dev.parent; | |
752 | struct resource_entry *window, *n; | |
753 | struct pci_bus *bus, *b; | |
754 | resource_size_t offset; | |
755 | LIST_HEAD(resources); | |
756 | struct resource *res; | |
757 | char addr[64], *fmt; | |
758 | const char *name; | |
759 | int err; | |
760 | ||
761 | bus = pci_alloc_bus(NULL); | |
762 | if (!bus) | |
763 | return -ENOMEM; | |
764 | ||
765 | bridge->bus = bus; | |
766 | ||
767 | /* temporarily move resources off the list */ | |
768 | list_splice_init(&bridge->windows, &resources); | |
769 | bus->sysdata = bridge->sysdata; | |
770 | bus->msi = bridge->msi; | |
771 | bus->ops = bridge->ops; | |
772 | bus->number = bus->busn_res.start = bridge->busnr; | |
773 | #ifdef CONFIG_PCI_DOMAINS_GENERIC | |
774 | bus->domain_nr = pci_bus_find_domain_nr(bus, parent); | |
775 | #endif | |
776 | ||
777 | b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); | |
778 | if (b) { | |
779 | /* If we already got to this bus through a different bridge, ignore it */ | |
780 | dev_dbg(&b->dev, "bus already known\n"); | |
781 | err = -EEXIST; | |
782 | goto free; | |
783 | } | |
784 | ||
785 | dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), | |
786 | bridge->busnr); | |
787 | ||
788 | err = pcibios_root_bridge_prepare(bridge); | |
789 | if (err) | |
790 | goto free; | |
791 | ||
792 | err = device_register(&bridge->dev); | |
793 | if (err) | |
794 | put_device(&bridge->dev); | |
795 | ||
796 | bus->bridge = get_device(&bridge->dev); | |
797 | device_enable_async_suspend(bus->bridge); | |
798 | pci_set_bus_of_node(bus); | |
799 | pci_set_bus_msi_domain(bus); | |
800 | ||
801 | if (!parent) | |
802 | set_dev_node(bus->bridge, pcibus_to_node(bus)); | |
803 | ||
804 | bus->dev.class = &pcibus_class; | |
805 | bus->dev.parent = bus->bridge; | |
806 | ||
807 | dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number); | |
808 | name = dev_name(&bus->dev); | |
809 | ||
810 | err = device_register(&bus->dev); | |
811 | if (err) | |
812 | goto unregister; | |
813 | ||
814 | pcibios_add_bus(bus); | |
815 | ||
816 | /* Create legacy_io and legacy_mem files for this bus */ | |
817 | pci_create_legacy_files(bus); | |
818 | ||
819 | if (parent) | |
820 | dev_info(parent, "PCI host bridge to bus %s\n", name); | |
821 | else | |
822 | pr_info("PCI host bridge to bus %s\n", name); | |
823 | ||
824 | /* Add initial resources to the bus */ | |
825 | resource_list_for_each_entry_safe(window, n, &resources) { | |
826 | list_move_tail(&window->node, &bridge->windows); | |
827 | offset = window->offset; | |
828 | res = window->res; | |
829 | ||
830 | if (res->flags & IORESOURCE_BUS) | |
831 | pci_bus_insert_busn_res(bus, bus->number, res->end); | |
832 | else | |
833 | pci_bus_add_resource(bus, res, 0); | |
834 | ||
835 | if (offset) { | |
836 | if (resource_type(res) == IORESOURCE_IO) | |
837 | fmt = " (bus address [%#06llx-%#06llx])"; | |
838 | else | |
839 | fmt = " (bus address [%#010llx-%#010llx])"; | |
840 | ||
841 | snprintf(addr, sizeof(addr), fmt, | |
842 | (unsigned long long)(res->start - offset), | |
843 | (unsigned long long)(res->end - offset)); | |
844 | } else | |
845 | addr[0] = '\0'; | |
846 | ||
847 | dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr); | |
848 | } | |
849 | ||
850 | down_write(&pci_bus_sem); | |
851 | list_add_tail(&bus->node, &pci_root_buses); | |
852 | up_write(&pci_bus_sem); | |
853 | ||
854 | return 0; | |
855 | ||
856 | unregister: | |
857 | put_device(&bridge->dev); | |
858 | device_unregister(&bridge->dev); | |
859 | ||
860 | free: | |
861 | kfree(bus); | |
862 | return err; | |
863 | } | |
864 | ||
cbd4e055 AB |
865 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, |
866 | struct pci_dev *bridge, int busnr) | |
1da177e4 LT |
867 | { |
868 | struct pci_bus *child; | |
869 | int i; | |
4f535093 | 870 | int ret; |
1da177e4 LT |
871 | |
872 | /* | |
873 | * Allocate a new bus, and inherit stuff from the parent.. | |
874 | */ | |
670ba0c8 | 875 | child = pci_alloc_bus(parent); |
1da177e4 LT |
876 | if (!child) |
877 | return NULL; | |
878 | ||
1da177e4 LT |
879 | child->parent = parent; |
880 | child->ops = parent->ops; | |
0cbdcfcf | 881 | child->msi = parent->msi; |
1da177e4 | 882 | child->sysdata = parent->sysdata; |
6e325a62 | 883 | child->bus_flags = parent->bus_flags; |
1da177e4 | 884 | |
fd7d1ced | 885 | /* initialize some portions of the bus device, but don't register it |
4f535093 | 886 | * now as the parent is not properly set up yet. |
fd7d1ced GKH |
887 | */ |
888 | child->dev.class = &pcibus_class; | |
1a927133 | 889 | dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); |
1da177e4 LT |
890 | |
891 | /* | |
892 | * Set up the primary, secondary and subordinate | |
893 | * bus numbers. | |
894 | */ | |
b918c62e YL |
895 | child->number = child->busn_res.start = busnr; |
896 | child->primary = parent->busn_res.start; | |
897 | child->busn_res.end = 0xff; | |
1da177e4 | 898 | |
4f535093 YL |
899 | if (!bridge) { |
900 | child->dev.parent = parent->bridge; | |
901 | goto add_dev; | |
902 | } | |
3789fa8a YZ |
903 | |
904 | child->self = bridge; | |
905 | child->bridge = get_device(&bridge->dev); | |
4f535093 | 906 | child->dev.parent = child->bridge; |
98d9f30c | 907 | pci_set_bus_of_node(child); |
9be60ca0 MW |
908 | pci_set_bus_speed(child); |
909 | ||
1da177e4 | 910 | /* Set up default resource pointers and names.. */ |
fde09c6d | 911 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { |
1da177e4 LT |
912 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; |
913 | child->resource[i]->name = child->name; | |
914 | } | |
915 | bridge->subordinate = child; | |
916 | ||
4f535093 | 917 | add_dev: |
44aa0c65 | 918 | pci_set_bus_msi_domain(child); |
4f535093 YL |
919 | ret = device_register(&child->dev); |
920 | WARN_ON(ret < 0); | |
921 | ||
10a95747 JL |
922 | pcibios_add_bus(child); |
923 | ||
057bd2e0 TR |
924 | if (child->ops->add_bus) { |
925 | ret = child->ops->add_bus(child); | |
926 | if (WARN_ON(ret < 0)) | |
927 | dev_err(&child->dev, "failed to add bus: %d\n", ret); | |
928 | } | |
929 | ||
4f535093 YL |
930 | /* Create legacy_io and legacy_mem files for this bus */ |
931 | pci_create_legacy_files(child); | |
932 | ||
1da177e4 LT |
933 | return child; |
934 | } | |
935 | ||
3c78bc61 RD |
936 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, |
937 | int busnr) | |
1da177e4 LT |
938 | { |
939 | struct pci_bus *child; | |
940 | ||
941 | child = pci_alloc_child_bus(parent, dev, busnr); | |
e4ea9bb7 | 942 | if (child) { |
d71374da | 943 | down_write(&pci_bus_sem); |
1da177e4 | 944 | list_add_tail(&child->node, &parent->children); |
d71374da | 945 | up_write(&pci_bus_sem); |
e4ea9bb7 | 946 | } |
1da177e4 LT |
947 | return child; |
948 | } | |
b7fe9434 | 949 | EXPORT_SYMBOL(pci_add_new_bus); |
1da177e4 | 950 | |
f3dbd802 RJ |
951 | static void pci_enable_crs(struct pci_dev *pdev) |
952 | { | |
953 | u16 root_cap = 0; | |
954 | ||
955 | /* Enable CRS Software Visibility if supported */ | |
956 | pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); | |
957 | if (root_cap & PCI_EXP_RTCAP_CRSVIS) | |
958 | pcie_capability_set_word(pdev, PCI_EXP_RTCTL, | |
959 | PCI_EXP_RTCTL_CRSSVE); | |
960 | } | |
961 | ||
1c02ea81 MW |
962 | static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, |
963 | unsigned int available_buses); | |
964 | ||
1da177e4 | 965 | /* |
1c02ea81 MW |
966 | * pci_scan_bridge_extend() - Scan buses behind a bridge |
967 | * @bus: Parent bus the bridge is on | |
968 | * @dev: Bridge itself | |
969 | * @max: Starting subordinate number of buses behind this bridge | |
970 | * @available_buses: Total number of buses available for this bridge and | |
971 | * the devices below. After the minimal bus space has | |
972 | * been allocated the remaining buses will be | |
973 | * distributed equally between hotplug-capable bridges. | |
974 | * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges | |
975 | * that need to be reconfigured. | |
976 | * | |
1da177e4 LT |
977 | * If it's a bridge, configure it and scan the bus behind it. |
978 | * For CardBus bridges, we don't scan behind as the devices will | |
979 | * be handled by the bridge driver itself. | |
980 | * | |
981 | * We need to process bridges in two passes -- first we scan those | |
982 | * already configured by the BIOS and after we are done with all of | |
983 | * them, we proceed to assigning numbers to the remaining buses in | |
984 | * order to avoid overlaps between old and new bus numbers. | |
985 | */ | |
1c02ea81 MW |
986 | static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, |
987 | int max, unsigned int available_buses, | |
988 | int pass) | |
1da177e4 LT |
989 | { |
990 | struct pci_bus *child; | |
991 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | |
49887941 | 992 | u32 buses, i, j = 0; |
1da177e4 | 993 | u16 bctl; |
99ddd552 | 994 | u8 primary, secondary, subordinate; |
a1c19894 | 995 | int broken = 0; |
1da177e4 | 996 | |
d963f651 MW |
997 | /* |
998 | * Make sure the bridge is powered on to be able to access config | |
999 | * space of devices below it. | |
1000 | */ | |
1001 | pm_runtime_get_sync(&dev->dev); | |
1002 | ||
1da177e4 | 1003 | pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); |
99ddd552 BH |
1004 | primary = buses & 0xFF; |
1005 | secondary = (buses >> 8) & 0xFF; | |
1006 | subordinate = (buses >> 16) & 0xFF; | |
1da177e4 | 1007 | |
99ddd552 BH |
1008 | dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", |
1009 | secondary, subordinate, pass); | |
1da177e4 | 1010 | |
71f6bd4a YL |
1011 | if (!primary && (primary != bus->number) && secondary && subordinate) { |
1012 | dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); | |
1013 | primary = bus->number; | |
1014 | } | |
1015 | ||
a1c19894 BH |
1016 | /* Check if setup is sensible at all */ |
1017 | if (!pass && | |
1965f66e | 1018 | (primary != bus->number || secondary <= bus->number || |
12d87069 | 1019 | secondary > subordinate)) { |
1965f66e YL |
1020 | dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", |
1021 | secondary, subordinate); | |
a1c19894 BH |
1022 | broken = 1; |
1023 | } | |
1024 | ||
1da177e4 | 1025 | /* Disable MasterAbortMode during probing to avoid reporting |
f7625980 | 1026 | of bus errors (in some architectures) */ |
1da177e4 LT |
1027 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); |
1028 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, | |
1029 | bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); | |
1030 | ||
f3dbd802 RJ |
1031 | pci_enable_crs(dev); |
1032 | ||
99ddd552 BH |
1033 | if ((secondary || subordinate) && !pcibios_assign_all_busses() && |
1034 | !is_cardbus && !broken) { | |
1035 | unsigned int cmax; | |
1da177e4 LT |
1036 | /* |
1037 | * Bus already configured by firmware, process it in the first | |
1038 | * pass and just note the configuration. | |
1039 | */ | |
1040 | if (pass) | |
bbe8f9a3 | 1041 | goto out; |
1da177e4 LT |
1042 | |
1043 | /* | |
2ed85823 AN |
1044 | * The bus might already exist for two reasons: Either we are |
1045 | * rescanning the bus or the bus is reachable through more than | |
1046 | * one bridge. The second case can happen with the i450NX | |
1047 | * chipset. | |
1da177e4 | 1048 | */ |
99ddd552 | 1049 | child = pci_find_bus(pci_domain_nr(bus), secondary); |
74710ded | 1050 | if (!child) { |
99ddd552 | 1051 | child = pci_add_new_bus(bus, dev, secondary); |
74710ded AC |
1052 | if (!child) |
1053 | goto out; | |
99ddd552 | 1054 | child->primary = primary; |
bc76b731 | 1055 | pci_bus_insert_busn_res(child, secondary, subordinate); |
74710ded | 1056 | child->bridge_ctl = bctl; |
1da177e4 LT |
1057 | } |
1058 | ||
1da177e4 | 1059 | cmax = pci_scan_child_bus(child); |
c95b0bd6 AN |
1060 | if (cmax > subordinate) |
1061 | dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", | |
1062 | subordinate, cmax); | |
1063 | /* subordinate should equal child->busn_res.end */ | |
1064 | if (subordinate > max) | |
1065 | max = subordinate; | |
1da177e4 LT |
1066 | } else { |
1067 | /* | |
1068 | * We need to assign a number to this bus which we always | |
1069 | * do in the second pass. | |
1070 | */ | |
12f44f46 | 1071 | if (!pass) { |
619c8c31 | 1072 | if (pcibios_assign_all_busses() || broken || is_cardbus) |
12f44f46 IK |
1073 | /* Temporarily disable forwarding of the |
1074 | configuration cycles on all bridges in | |
1075 | this bus segment to avoid possible | |
1076 | conflicts in the second pass between two | |
1077 | bridges programmed with overlapping | |
1078 | bus ranges. */ | |
1079 | pci_write_config_dword(dev, PCI_PRIMARY_BUS, | |
1080 | buses & ~0xffffff); | |
bbe8f9a3 | 1081 | goto out; |
12f44f46 | 1082 | } |
1da177e4 LT |
1083 | |
1084 | /* Clear errors */ | |
1085 | pci_write_config_word(dev, PCI_STATUS, 0xffff); | |
1086 | ||
7a0b33d4 BH |
1087 | /* Prevent assigning a bus number that already exists. |
1088 | * This can happen when a bridge is hot-plugged, so in | |
1089 | * this case we only re-scan this bus. */ | |
b1a98b69 TC |
1090 | child = pci_find_bus(pci_domain_nr(bus), max+1); |
1091 | if (!child) { | |
9a4d7d87 | 1092 | child = pci_add_new_bus(bus, dev, max+1); |
b1a98b69 TC |
1093 | if (!child) |
1094 | goto out; | |
a20c7f36 MW |
1095 | pci_bus_insert_busn_res(child, max+1, |
1096 | bus->busn_res.end); | |
b1a98b69 | 1097 | } |
9a4d7d87 | 1098 | max++; |
1c02ea81 MW |
1099 | if (available_buses) |
1100 | available_buses--; | |
1101 | ||
1da177e4 LT |
1102 | buses = (buses & 0xff000000) |
1103 | | ((unsigned int)(child->primary) << 0) | |
b918c62e YL |
1104 | | ((unsigned int)(child->busn_res.start) << 8) |
1105 | | ((unsigned int)(child->busn_res.end) << 16); | |
1da177e4 LT |
1106 | |
1107 | /* | |
1108 | * yenta.c forces a secondary latency timer of 176. | |
1109 | * Copy that behaviour here. | |
1110 | */ | |
1111 | if (is_cardbus) { | |
1112 | buses &= ~0xff000000; | |
1113 | buses |= CARDBUS_LATENCY_TIMER << 24; | |
1114 | } | |
7c867c88 | 1115 | |
1da177e4 LT |
1116 | /* |
1117 | * We need to blast all three values with a single write. | |
1118 | */ | |
1119 | pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); | |
1120 | ||
1121 | if (!is_cardbus) { | |
11949255 | 1122 | child->bridge_ctl = bctl; |
1c02ea81 | 1123 | max = pci_scan_child_bus_extend(child, available_buses); |
1da177e4 LT |
1124 | } else { |
1125 | /* | |
1126 | * For CardBus bridges, we leave 4 bus numbers | |
1127 | * as cards with a PCI-to-PCI bridge can be | |
1128 | * inserted later. | |
1129 | */ | |
3c78bc61 | 1130 | for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) { |
49887941 | 1131 | struct pci_bus *parent = bus; |
cc57450f RS |
1132 | if (pci_find_bus(pci_domain_nr(bus), |
1133 | max+i+1)) | |
1134 | break; | |
49887941 DB |
1135 | while (parent->parent) { |
1136 | if ((!pcibios_assign_all_busses()) && | |
b918c62e YL |
1137 | (parent->busn_res.end > max) && |
1138 | (parent->busn_res.end <= max+i)) { | |
49887941 DB |
1139 | j = 1; |
1140 | } | |
1141 | parent = parent->parent; | |
1142 | } | |
1143 | if (j) { | |
1144 | /* | |
1145 | * Often, there are two cardbus bridges | |
1146 | * -- try to leave one valid bus number | |
1147 | * for each one. | |
1148 | */ | |
1149 | i /= 2; | |
1150 | break; | |
1151 | } | |
1152 | } | |
cc57450f | 1153 | max += i; |
1da177e4 LT |
1154 | } |
1155 | /* | |
1156 | * Set the subordinate bus number to its real value. | |
1157 | */ | |
bc76b731 | 1158 | pci_bus_update_busn_res_end(child, max); |
1da177e4 LT |
1159 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); |
1160 | } | |
1161 | ||
cb3576fa GH |
1162 | sprintf(child->name, |
1163 | (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), | |
1164 | pci_domain_nr(bus), child->number); | |
1da177e4 | 1165 | |
d55bef51 | 1166 | /* Has only triggered on CardBus, fixup is in yenta_socket */ |
49887941 | 1167 | while (bus->parent) { |
b918c62e YL |
1168 | if ((child->busn_res.end > bus->busn_res.end) || |
1169 | (child->number > bus->busn_res.end) || | |
49887941 | 1170 | (child->number < bus->number) || |
b918c62e | 1171 | (child->busn_res.end < bus->number)) { |
227f0647 | 1172 | dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n", |
b918c62e YL |
1173 | &child->busn_res, |
1174 | (bus->number > child->busn_res.end && | |
1175 | bus->busn_res.end < child->number) ? | |
a6f29a98 JP |
1176 | "wholly" : "partially", |
1177 | bus->self->transparent ? " transparent" : "", | |
865df576 | 1178 | dev_name(&bus->dev), |
b918c62e | 1179 | &bus->busn_res); |
49887941 DB |
1180 | } |
1181 | bus = bus->parent; | |
1182 | } | |
1183 | ||
bbe8f9a3 RB |
1184 | out: |
1185 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); | |
1186 | ||
d963f651 MW |
1187 | pm_runtime_put(&dev->dev); |
1188 | ||
1da177e4 LT |
1189 | return max; |
1190 | } | |
1c02ea81 MW |
1191 | |
1192 | /* | |
1193 | * pci_scan_bridge() - Scan buses behind a bridge | |
1194 | * @bus: Parent bus the bridge is on | |
1195 | * @dev: Bridge itself | |
1196 | * @max: Starting subordinate number of buses behind this bridge | |
1197 | * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges | |
1198 | * that need to be reconfigured. | |
1199 | * | |
1200 | * If it's a bridge, configure it and scan the bus behind it. | |
1201 | * For CardBus bridges, we don't scan behind as the devices will | |
1202 | * be handled by the bridge driver itself. | |
1203 | * | |
1204 | * We need to process bridges in two passes -- first we scan those | |
1205 | * already configured by the BIOS and after we are done with all of | |
1206 | * them, we proceed to assigning numbers to the remaining buses in | |
1207 | * order to avoid overlaps between old and new bus numbers. | |
1208 | */ | |
1209 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) | |
1210 | { | |
1211 | return pci_scan_bridge_extend(bus, dev, max, 0, pass); | |
1212 | } | |
b7fe9434 | 1213 | EXPORT_SYMBOL(pci_scan_bridge); |
1da177e4 LT |
1214 | |
1215 | /* | |
1216 | * Read interrupt line and base address registers. | |
1217 | * The architecture-dependent code can tweak these, of course. | |
1218 | */ | |
1219 | static void pci_read_irq(struct pci_dev *dev) | |
1220 | { | |
1221 | unsigned char irq; | |
1222 | ||
1223 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); | |
ffeff788 | 1224 | dev->pin = irq; |
1da177e4 LT |
1225 | if (irq) |
1226 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); | |
1227 | dev->irq = irq; | |
1228 | } | |
1229 | ||
bb209c82 | 1230 | void set_pcie_port_type(struct pci_dev *pdev) |
480b93b7 YZ |
1231 | { |
1232 | int pos; | |
1233 | u16 reg16; | |
d0751b98 YW |
1234 | int type; |
1235 | struct pci_dev *parent; | |
480b93b7 YZ |
1236 | |
1237 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | |
1238 | if (!pos) | |
1239 | return; | |
51ebfc92 | 1240 | |
0efea000 | 1241 | pdev->pcie_cap = pos; |
480b93b7 | 1242 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
786e2288 | 1243 | pdev->pcie_flags_reg = reg16; |
b03e7495 JM |
1244 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); |
1245 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; | |
d0751b98 YW |
1246 | |
1247 | /* | |
51ebfc92 BH |
1248 | * A Root Port or a PCI-to-PCIe bridge is always the upstream end |
1249 | * of a Link. No PCIe component has two Links. Two Links are | |
1250 | * connected by a Switch that has a Port on each Link and internal | |
1251 | * logic to connect the two Ports. | |
d0751b98 YW |
1252 | */ |
1253 | type = pci_pcie_type(pdev); | |
51ebfc92 BH |
1254 | if (type == PCI_EXP_TYPE_ROOT_PORT || |
1255 | type == PCI_EXP_TYPE_PCIE_BRIDGE) | |
d0751b98 YW |
1256 | pdev->has_secondary_link = 1; |
1257 | else if (type == PCI_EXP_TYPE_UPSTREAM || | |
1258 | type == PCI_EXP_TYPE_DOWNSTREAM) { | |
1259 | parent = pci_upstream_bridge(pdev); | |
b35b1df5 YW |
1260 | |
1261 | /* | |
1262 | * Usually there's an upstream device (Root Port or Switch | |
1263 | * Downstream Port), but we can't assume one exists. | |
1264 | */ | |
1265 | if (parent && !parent->has_secondary_link) | |
d0751b98 YW |
1266 | pdev->has_secondary_link = 1; |
1267 | } | |
480b93b7 YZ |
1268 | } |
1269 | ||
bb209c82 | 1270 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
28760489 | 1271 | { |
28760489 EB |
1272 | u32 reg32; |
1273 | ||
59875ae4 | 1274 | pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); |
28760489 EB |
1275 | if (reg32 & PCI_EXP_SLTCAP_HPC) |
1276 | pdev->is_hotplug_bridge = 1; | |
1277 | } | |
1278 | ||
8531e283 LW |
1279 | static void set_pcie_thunderbolt(struct pci_dev *dev) |
1280 | { | |
1281 | int vsec = 0; | |
1282 | u32 header; | |
1283 | ||
1284 | while ((vsec = pci_find_next_ext_capability(dev, vsec, | |
1285 | PCI_EXT_CAP_ID_VNDR))) { | |
1286 | pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header); | |
1287 | ||
1288 | /* Is the device part of a Thunderbolt controller? */ | |
1289 | if (dev->vendor == PCI_VENDOR_ID_INTEL && | |
1290 | PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) { | |
1291 | dev->is_thunderbolt = 1; | |
1292 | return; | |
1293 | } | |
1294 | } | |
1295 | } | |
1296 | ||
78916b00 AW |
1297 | /** |
1298 | * pci_ext_cfg_is_aliased - is ext config space just an alias of std config? | |
1299 | * @dev: PCI device | |
1300 | * | |
1301 | * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that | |
1302 | * when forwarding a type1 configuration request the bridge must check that | |
1303 | * the extended register address field is zero. The bridge is not permitted | |
1304 | * to forward the transactions and must handle it as an Unsupported Request. | |
1305 | * Some bridges do not follow this rule and simply drop the extended register | |
1306 | * bits, resulting in the standard config space being aliased, every 256 | |
1307 | * bytes across the entire configuration space. Test for this condition by | |
1308 | * comparing the first dword of each potential alias to the vendor/device ID. | |
1309 | * Known offenders: | |
1310 | * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) | |
1311 | * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) | |
1312 | */ | |
1313 | static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) | |
1314 | { | |
1315 | #ifdef CONFIG_PCI_QUIRKS | |
1316 | int pos; | |
1317 | u32 header, tmp; | |
1318 | ||
1319 | pci_read_config_dword(dev, PCI_VENDOR_ID, &header); | |
1320 | ||
1321 | for (pos = PCI_CFG_SPACE_SIZE; | |
1322 | pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { | |
1323 | if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL | |
1324 | || header != tmp) | |
1325 | return false; | |
1326 | } | |
1327 | ||
1328 | return true; | |
1329 | #else | |
1330 | return false; | |
1331 | #endif | |
1332 | } | |
1333 | ||
0b950f0f SH |
1334 | /** |
1335 | * pci_cfg_space_size - get the configuration space size of the PCI device. | |
1336 | * @dev: PCI device | |
1337 | * | |
1338 | * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices | |
1339 | * have 4096 bytes. Even if the device is capable, that doesn't mean we can | |
1340 | * access it. Maybe we don't have a way to generate extended config space | |
1341 | * accesses, or the device is behind a reverse Express bridge. So we try | |
1342 | * reading the dword at 0x100 which must either be 0 or a valid extended | |
1343 | * capability header. | |
1344 | */ | |
1345 | static int pci_cfg_space_size_ext(struct pci_dev *dev) | |
1346 | { | |
1347 | u32 status; | |
1348 | int pos = PCI_CFG_SPACE_SIZE; | |
1349 | ||
1350 | if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) | |
8e5a395a | 1351 | return PCI_CFG_SPACE_SIZE; |
78916b00 | 1352 | if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) |
8e5a395a | 1353 | return PCI_CFG_SPACE_SIZE; |
0b950f0f SH |
1354 | |
1355 | return PCI_CFG_SPACE_EXP_SIZE; | |
0b950f0f SH |
1356 | } |
1357 | ||
1358 | int pci_cfg_space_size(struct pci_dev *dev) | |
1359 | { | |
1360 | int pos; | |
1361 | u32 status; | |
1362 | u16 class; | |
1363 | ||
1364 | class = dev->class >> 8; | |
1365 | if (class == PCI_CLASS_BRIDGE_HOST) | |
1366 | return pci_cfg_space_size_ext(dev); | |
1367 | ||
8e5a395a BH |
1368 | if (pci_is_pcie(dev)) |
1369 | return pci_cfg_space_size_ext(dev); | |
0b950f0f | 1370 | |
8e5a395a BH |
1371 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
1372 | if (!pos) | |
1373 | return PCI_CFG_SPACE_SIZE; | |
0b950f0f | 1374 | |
8e5a395a BH |
1375 | pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); |
1376 | if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)) | |
1377 | return pci_cfg_space_size_ext(dev); | |
0b950f0f | 1378 | |
0b950f0f SH |
1379 | return PCI_CFG_SPACE_SIZE; |
1380 | } | |
1381 | ||
01abc2aa | 1382 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
76e6a1d6 | 1383 | |
e80e7edc | 1384 | static void pci_msi_setup_pci_dev(struct pci_dev *dev) |
1851617c MT |
1385 | { |
1386 | /* | |
1387 | * Disable the MSI hardware to avoid screaming interrupts | |
1388 | * during boot. This is the power on reset default so | |
1389 | * usually this should be a noop. | |
1390 | */ | |
1391 | dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); | |
1392 | if (dev->msi_cap) | |
1393 | pci_msi_set_enable(dev, 0); | |
1394 | ||
1395 | dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
1396 | if (dev->msix_cap) | |
1397 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); | |
1398 | } | |
1399 | ||
99b3c58f PG |
1400 | /** |
1401 | * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability | |
1402 | * @dev: PCI device | |
1403 | * | |
1404 | * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this | |
1405 | * at enumeration-time to avoid modifying PCI_COMMAND at run-time. | |
1406 | */ | |
1407 | static int pci_intx_mask_broken(struct pci_dev *dev) | |
1408 | { | |
1409 | u16 orig, toggle, new; | |
1410 | ||
1411 | pci_read_config_word(dev, PCI_COMMAND, &orig); | |
1412 | toggle = orig ^ PCI_COMMAND_INTX_DISABLE; | |
1413 | pci_write_config_word(dev, PCI_COMMAND, toggle); | |
1414 | pci_read_config_word(dev, PCI_COMMAND, &new); | |
1415 | ||
1416 | pci_write_config_word(dev, PCI_COMMAND, orig); | |
1417 | ||
1418 | /* | |
1419 | * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI | |
1420 | * r2.3, so strictly speaking, a device is not *broken* if it's not | |
1421 | * writable. But we'll live with the misnomer for now. | |
1422 | */ | |
1423 | if (new != toggle) | |
1424 | return 1; | |
1425 | return 0; | |
1426 | } | |
1427 | ||
1da177e4 LT |
1428 | /** |
1429 | * pci_setup_device - fill in class and map information of a device | |
1430 | * @dev: the device structure to fill | |
1431 | * | |
f7625980 | 1432 | * Initialize the device structure with information about the device's |
1da177e4 LT |
1433 | * vendor,class,memory and IO-space addresses,IRQ lines etc. |
1434 | * Called at initialisation of the PCI subsystem and by CardBus services. | |
480b93b7 YZ |
1435 | * Returns 0 on success and negative if unknown type of device (not normal, |
1436 | * bridge or CardBus). | |
1da177e4 | 1437 | */ |
480b93b7 | 1438 | int pci_setup_device(struct pci_dev *dev) |
1da177e4 LT |
1439 | { |
1440 | u32 class; | |
b84106b4 | 1441 | u16 cmd; |
480b93b7 | 1442 | u8 hdr_type; |
bc577d2b | 1443 | int pos = 0; |
5bfa14ed BH |
1444 | struct pci_bus_region region; |
1445 | struct resource *res; | |
480b93b7 YZ |
1446 | |
1447 | if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) | |
1448 | return -EIO; | |
1449 | ||
1450 | dev->sysdata = dev->bus->sysdata; | |
1451 | dev->dev.parent = dev->bus->bridge; | |
1452 | dev->dev.bus = &pci_bus_type; | |
1453 | dev->hdr_type = hdr_type & 0x7f; | |
1454 | dev->multifunction = !!(hdr_type & 0x80); | |
480b93b7 YZ |
1455 | dev->error_state = pci_channel_io_normal; |
1456 | set_pcie_port_type(dev); | |
1457 | ||
017ffe64 | 1458 | pci_dev_assign_slot(dev); |
480b93b7 YZ |
1459 | /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) |
1460 | set this higher, assuming the system even supports it. */ | |
1461 | dev->dma_mask = 0xffffffff; | |
1da177e4 | 1462 | |
eebfcfb5 GKH |
1463 | dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), |
1464 | dev->bus->number, PCI_SLOT(dev->devfn), | |
1465 | PCI_FUNC(dev->devfn)); | |
1da177e4 LT |
1466 | |
1467 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); | |
b8a3a521 | 1468 | dev->revision = class & 0xff; |
2dd8ba92 | 1469 | dev->class = class >> 8; /* upper 3 bytes */ |
1da177e4 | 1470 | |
2dd8ba92 YL |
1471 | dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", |
1472 | dev->vendor, dev->device, dev->hdr_type, dev->class); | |
1da177e4 | 1473 | |
853346e4 YZ |
1474 | /* need to have dev->class ready */ |
1475 | dev->cfg_size = pci_cfg_space_size(dev); | |
1476 | ||
8531e283 LW |
1477 | /* need to have dev->cfg_size ready */ |
1478 | set_pcie_thunderbolt(dev); | |
1479 | ||
1da177e4 | 1480 | /* "Unknown power state" */ |
3fe9d19f | 1481 | dev->current_state = PCI_UNKNOWN; |
1da177e4 LT |
1482 | |
1483 | /* Early fixups, before probing the BARs */ | |
1484 | pci_fixup_device(pci_fixup_early, dev); | |
f79b1b14 YZ |
1485 | /* device class may be changed after fixup */ |
1486 | class = dev->class >> 8; | |
1da177e4 | 1487 | |
b84106b4 BH |
1488 | if (dev->non_compliant_bars) { |
1489 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
1490 | if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { | |
1491 | dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); | |
1492 | cmd &= ~PCI_COMMAND_IO; | |
1493 | cmd &= ~PCI_COMMAND_MEMORY; | |
1494 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
1495 | } | |
1496 | } | |
1497 | ||
99b3c58f PG |
1498 | dev->broken_intx_masking = pci_intx_mask_broken(dev); |
1499 | ||
1da177e4 LT |
1500 | switch (dev->hdr_type) { /* header type */ |
1501 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ | |
1502 | if (class == PCI_CLASS_BRIDGE_PCI) | |
1503 | goto bad; | |
1504 | pci_read_irq(dev); | |
1505 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); | |
1506 | pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); | |
1507 | pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); | |
368c73d4 AC |
1508 | |
1509 | /* | |
075eb9e3 BH |
1510 | * Do the ugly legacy mode stuff here rather than broken chip |
1511 | * quirk code. Legacy mode ATA controllers have fixed | |
1512 | * addresses. These are not always echoed in BAR0-3, and | |
1513 | * BAR0-3 in a few cases contain junk! | |
368c73d4 AC |
1514 | */ |
1515 | if (class == PCI_CLASS_STORAGE_IDE) { | |
1516 | u8 progif; | |
1517 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); | |
1518 | if ((progif & 1) == 0) { | |
5bfa14ed BH |
1519 | region.start = 0x1F0; |
1520 | region.end = 0x1F7; | |
1521 | res = &dev->resource[0]; | |
1522 | res->flags = LEGACY_IO_RESOURCE; | |
fc279850 | 1523 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
075eb9e3 BH |
1524 | dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n", |
1525 | res); | |
5bfa14ed BH |
1526 | region.start = 0x3F6; |
1527 | region.end = 0x3F6; | |
1528 | res = &dev->resource[1]; | |
1529 | res->flags = LEGACY_IO_RESOURCE; | |
fc279850 | 1530 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
075eb9e3 BH |
1531 | dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n", |
1532 | res); | |
368c73d4 AC |
1533 | } |
1534 | if ((progif & 4) == 0) { | |
5bfa14ed BH |
1535 | region.start = 0x170; |
1536 | region.end = 0x177; | |
1537 | res = &dev->resource[2]; | |
1538 | res->flags = LEGACY_IO_RESOURCE; | |
fc279850 | 1539 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
075eb9e3 BH |
1540 | dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n", |
1541 | res); | |
5bfa14ed BH |
1542 | region.start = 0x376; |
1543 | region.end = 0x376; | |
1544 | res = &dev->resource[3]; | |
1545 | res->flags = LEGACY_IO_RESOURCE; | |
fc279850 | 1546 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
075eb9e3 BH |
1547 | dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n", |
1548 | res); | |
368c73d4 AC |
1549 | } |
1550 | } | |
1da177e4 LT |
1551 | break; |
1552 | ||
1553 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ | |
1554 | if (class != PCI_CLASS_BRIDGE_PCI) | |
1555 | goto bad; | |
1556 | /* The PCI-to-PCI bridge spec requires that subtractive | |
1557 | decoding (i.e. transparent) bridge must have programming | |
f7625980 | 1558 | interface code of 0x01. */ |
3efd273b | 1559 | pci_read_irq(dev); |
1da177e4 LT |
1560 | dev->transparent = ((dev->class & 0xff) == 1); |
1561 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); | |
28760489 | 1562 | set_pcie_hotplug_bridge(dev); |
bc577d2b GB |
1563 | pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); |
1564 | if (pos) { | |
1565 | pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); | |
1566 | pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); | |
1567 | } | |
1da177e4 LT |
1568 | break; |
1569 | ||
1570 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ | |
1571 | if (class != PCI_CLASS_BRIDGE_CARDBUS) | |
1572 | goto bad; | |
1573 | pci_read_irq(dev); | |
1574 | pci_read_bases(dev, 1, 0); | |
1575 | pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); | |
1576 | pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); | |
1577 | break; | |
1578 | ||
1579 | default: /* unknown header */ | |
227f0647 RD |
1580 | dev_err(&dev->dev, "unknown header type %02x, ignoring device\n", |
1581 | dev->hdr_type); | |
480b93b7 | 1582 | return -EIO; |
1da177e4 LT |
1583 | |
1584 | bad: | |
227f0647 RD |
1585 | dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n", |
1586 | dev->class, dev->hdr_type); | |
2b4aed1d | 1587 | dev->class = PCI_CLASS_NOT_DEFINED << 8; |
1da177e4 LT |
1588 | } |
1589 | ||
1590 | /* We found a fine healthy device, go go go... */ | |
1591 | return 0; | |
1592 | } | |
1593 | ||
9dae3a97 BH |
1594 | static void pci_configure_mps(struct pci_dev *dev) |
1595 | { | |
1596 | struct pci_dev *bridge = pci_upstream_bridge(dev); | |
27d868b5 | 1597 | int mps, p_mps, rc; |
9dae3a97 BH |
1598 | |
1599 | if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) | |
1600 | return; | |
1601 | ||
1602 | mps = pcie_get_mps(dev); | |
1603 | p_mps = pcie_get_mps(bridge); | |
1604 | ||
1605 | if (mps == p_mps) | |
1606 | return; | |
1607 | ||
1608 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { | |
1609 | dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", | |
1610 | mps, pci_name(bridge), p_mps); | |
1611 | return; | |
1612 | } | |
27d868b5 KB |
1613 | |
1614 | /* | |
1615 | * Fancier MPS configuration is done later by | |
1616 | * pcie_bus_configure_settings() | |
1617 | */ | |
1618 | if (pcie_bus_config != PCIE_BUS_DEFAULT) | |
1619 | return; | |
1620 | ||
1621 | rc = pcie_set_mps(dev, p_mps); | |
1622 | if (rc) { | |
1623 | dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", | |
1624 | p_mps); | |
1625 | return; | |
1626 | } | |
1627 | ||
1628 | dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n", | |
1629 | p_mps, mps, 128 << dev->pcie_mpss); | |
9dae3a97 BH |
1630 | } |
1631 | ||
589fcc23 BH |
1632 | static struct hpp_type0 pci_default_type0 = { |
1633 | .revision = 1, | |
1634 | .cache_line_size = 8, | |
1635 | .latency_timer = 0x40, | |
1636 | .enable_serr = 0, | |
1637 | .enable_perr = 0, | |
1638 | }; | |
1639 | ||
1640 | static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | |
1641 | { | |
1642 | u16 pci_cmd, pci_bctl; | |
1643 | ||
c6285fc5 | 1644 | if (!hpp) |
589fcc23 | 1645 | hpp = &pci_default_type0; |
589fcc23 BH |
1646 | |
1647 | if (hpp->revision > 1) { | |
1648 | dev_warn(&dev->dev, | |
1649 | "PCI settings rev %d not supported; using defaults\n", | |
1650 | hpp->revision); | |
1651 | hpp = &pci_default_type0; | |
1652 | } | |
1653 | ||
1654 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); | |
1655 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); | |
1656 | pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); | |
1657 | if (hpp->enable_serr) | |
1658 | pci_cmd |= PCI_COMMAND_SERR; | |
589fcc23 BH |
1659 | if (hpp->enable_perr) |
1660 | pci_cmd |= PCI_COMMAND_PARITY; | |
589fcc23 BH |
1661 | pci_write_config_word(dev, PCI_COMMAND, pci_cmd); |
1662 | ||
1663 | /* Program bridge control value */ | |
1664 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | |
1665 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | |
1666 | hpp->latency_timer); | |
1667 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | |
1668 | if (hpp->enable_serr) | |
1669 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | |
589fcc23 BH |
1670 | if (hpp->enable_perr) |
1671 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | |
589fcc23 BH |
1672 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); |
1673 | } | |
1674 | } | |
1675 | ||
1676 | static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) | |
1677 | { | |
977509f7 BH |
1678 | int pos; |
1679 | ||
1680 | if (!hpp) | |
1681 | return; | |
1682 | ||
1683 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | |
1684 | if (!pos) | |
1685 | return; | |
1686 | ||
1687 | dev_warn(&dev->dev, "PCI-X settings not supported\n"); | |
589fcc23 BH |
1688 | } |
1689 | ||
e42010d8 JT |
1690 | static bool pcie_root_rcb_set(struct pci_dev *dev) |
1691 | { | |
1692 | struct pci_dev *rp = pcie_find_root_port(dev); | |
1693 | u16 lnkctl; | |
1694 | ||
1695 | if (!rp) | |
1696 | return false; | |
1697 | ||
1698 | pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); | |
1699 | if (lnkctl & PCI_EXP_LNKCTL_RCB) | |
1700 | return true; | |
1701 | ||
1702 | return false; | |
1703 | } | |
1704 | ||
589fcc23 BH |
1705 | static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) |
1706 | { | |
1707 | int pos; | |
1708 | u32 reg32; | |
1709 | ||
1710 | if (!hpp) | |
1711 | return; | |
1712 | ||
977509f7 BH |
1713 | if (!pci_is_pcie(dev)) |
1714 | return; | |
1715 | ||
589fcc23 BH |
1716 | if (hpp->revision > 1) { |
1717 | dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", | |
1718 | hpp->revision); | |
1719 | return; | |
1720 | } | |
1721 | ||
302328c0 BH |
1722 | /* |
1723 | * Don't allow _HPX to change MPS or MRRS settings. We manage | |
1724 | * those to make sure they're consistent with the rest of the | |
1725 | * platform. | |
1726 | */ | |
1727 | hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | | |
1728 | PCI_EXP_DEVCTL_READRQ; | |
1729 | hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | | |
1730 | PCI_EXP_DEVCTL_READRQ); | |
1731 | ||
589fcc23 BH |
1732 | /* Initialize Device Control Register */ |
1733 | pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, | |
1734 | ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); | |
1735 | ||
1736 | /* Initialize Link Control Register */ | |
e42010d8 JT |
1737 | if (pcie_cap_has_lnkctl(dev)) { |
1738 | ||
1739 | /* | |
1740 | * If the Root Port supports Read Completion Boundary of | |
1741 | * 128, set RCB to 128. Otherwise, clear it. | |
1742 | */ | |
1743 | hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; | |
1744 | hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; | |
1745 | if (pcie_root_rcb_set(dev)) | |
1746 | hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; | |
1747 | ||
589fcc23 BH |
1748 | pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, |
1749 | ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); | |
e42010d8 | 1750 | } |
589fcc23 BH |
1751 | |
1752 | /* Find Advanced Error Reporting Enhanced Capability */ | |
1753 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | |
1754 | if (!pos) | |
1755 | return; | |
1756 | ||
1757 | /* Initialize Uncorrectable Error Mask Register */ | |
1758 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); | |
1759 | reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; | |
1760 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); | |
1761 | ||
1762 | /* Initialize Uncorrectable Error Severity Register */ | |
1763 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); | |
1764 | reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; | |
1765 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); | |
1766 | ||
1767 | /* Initialize Correctable Error Mask Register */ | |
1768 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); | |
1769 | reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; | |
1770 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); | |
1771 | ||
1772 | /* Initialize Advanced Error Capabilities and Control Register */ | |
1773 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | |
1774 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; | |
675734ba BH |
1775 | /* Don't enable ECRC generation or checking if unsupported */ |
1776 | if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) | |
1777 | reg32 &= ~PCI_ERR_CAP_ECRC_GENE; | |
1778 | if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) | |
1779 | reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; | |
589fcc23 BH |
1780 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); |
1781 | ||
1782 | /* | |
1783 | * FIXME: The following two registers are not supported yet. | |
1784 | * | |
1785 | * o Secondary Uncorrectable Error Severity Register | |
1786 | * o Secondary Uncorrectable Error Mask Register | |
1787 | */ | |
1788 | } | |
1789 | ||
62ce94a7 | 1790 | int pci_configure_extended_tags(struct pci_dev *dev, void *ign) |
60db3a4d | 1791 | { |
62ce94a7 SK |
1792 | struct pci_host_bridge *host; |
1793 | u32 cap; | |
1794 | u16 ctl; | |
60db3a4d SK |
1795 | int ret; |
1796 | ||
1797 | if (!pci_is_pcie(dev)) | |
62ce94a7 | 1798 | return 0; |
60db3a4d | 1799 | |
62ce94a7 | 1800 | ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); |
60db3a4d | 1801 | if (ret) |
62ce94a7 SK |
1802 | return 0; |
1803 | ||
1804 | if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) | |
1805 | return 0; | |
60db3a4d | 1806 | |
62ce94a7 SK |
1807 | ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); |
1808 | if (ret) | |
1809 | return 0; | |
1810 | ||
1811 | host = pci_find_host_bridge(dev->bus); | |
1812 | if (!host) | |
1813 | return 0; | |
60db3a4d | 1814 | |
62ce94a7 SK |
1815 | /* |
1816 | * If some device in the hierarchy doesn't handle Extended Tags | |
1817 | * correctly, make sure they're disabled. | |
1818 | */ | |
1819 | if (host->no_ext_tags) { | |
1820 | if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { | |
1821 | dev_info(&dev->dev, "disabling Extended Tags\n"); | |
1822 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, | |
1823 | PCI_EXP_DEVCTL_EXT_TAG); | |
1824 | } | |
1825 | return 0; | |
1826 | } | |
1827 | ||
1828 | if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) { | |
1829 | dev_info(&dev->dev, "enabling Extended Tags\n"); | |
60db3a4d SK |
1830 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, |
1831 | PCI_EXP_DEVCTL_EXT_TAG); | |
62ce94a7 SK |
1832 | } |
1833 | return 0; | |
60db3a4d SK |
1834 | } |
1835 | ||
a99b646a | 1836 | /** |
1837 | * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable | |
1838 | * @dev: PCI device to query | |
1839 | * | |
1840 | * Returns true if the device has enabled relaxed ordering attribute. | |
1841 | */ | |
1842 | bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) | |
1843 | { | |
1844 | u16 v; | |
1845 | ||
1846 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); | |
1847 | ||
1848 | return !!(v & PCI_EXP_DEVCTL_RELAX_EN); | |
1849 | } | |
1850 | EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); | |
1851 | ||
1852 | static void pci_configure_relaxed_ordering(struct pci_dev *dev) | |
1853 | { | |
1854 | struct pci_dev *root; | |
1855 | ||
1856 | /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */ | |
1857 | if (dev->is_virtfn) | |
1858 | return; | |
1859 | ||
1860 | if (!pcie_relaxed_ordering_enabled(dev)) | |
1861 | return; | |
1862 | ||
1863 | /* | |
1864 | * For now, we only deal with Relaxed Ordering issues with Root | |
1865 | * Ports. Peer-to-Peer DMA is another can of worms. | |
1866 | */ | |
1867 | root = pci_find_pcie_root_port(dev); | |
1868 | if (!root) | |
1869 | return; | |
1870 | ||
1871 | if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { | |
1872 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, | |
1873 | PCI_EXP_DEVCTL_RELAX_EN); | |
1874 | dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n"); | |
1875 | } | |
1876 | } | |
1877 | ||
6cd33649 BH |
1878 | static void pci_configure_device(struct pci_dev *dev) |
1879 | { | |
1880 | struct hotplug_params hpp; | |
1881 | int ret; | |
1882 | ||
9dae3a97 | 1883 | pci_configure_mps(dev); |
62ce94a7 | 1884 | pci_configure_extended_tags(dev, NULL); |
a99b646a | 1885 | pci_configure_relaxed_ordering(dev); |
9dae3a97 | 1886 | |
6cd33649 BH |
1887 | memset(&hpp, 0, sizeof(hpp)); |
1888 | ret = pci_get_hp_params(dev, &hpp); | |
1889 | if (ret) | |
1890 | return; | |
1891 | ||
1892 | program_hpp_type2(dev, hpp.t2); | |
1893 | program_hpp_type1(dev, hpp.t1); | |
1894 | program_hpp_type0(dev, hpp.t0); | |
1895 | } | |
1896 | ||
201de56e ZY |
1897 | static void pci_release_capabilities(struct pci_dev *dev) |
1898 | { | |
1899 | pci_vpd_release(dev); | |
d1b054da | 1900 | pci_iov_release(dev); |
f796841e | 1901 | pci_free_cap_save_buffers(dev); |
201de56e ZY |
1902 | } |
1903 | ||
1da177e4 LT |
1904 | /** |
1905 | * pci_release_dev - free a pci device structure when all users of it are finished. | |
1906 | * @dev: device that's been disconnected | |
1907 | * | |
1908 | * Will be called only by the device core when all users of this pci device are | |
1909 | * done. | |
1910 | */ | |
1911 | static void pci_release_dev(struct device *dev) | |
1912 | { | |
04480094 | 1913 | struct pci_dev *pci_dev; |
1da177e4 | 1914 | |
04480094 | 1915 | pci_dev = to_pci_dev(dev); |
201de56e | 1916 | pci_release_capabilities(pci_dev); |
98d9f30c | 1917 | pci_release_of_node(pci_dev); |
6ae32c53 | 1918 | pcibios_release_device(pci_dev); |
8b1fce04 | 1919 | pci_bus_put(pci_dev->bus); |
782a985d | 1920 | kfree(pci_dev->driver_override); |
338c3149 | 1921 | kfree(pci_dev->dma_alias_mask); |
1da177e4 LT |
1922 | kfree(pci_dev); |
1923 | } | |
1924 | ||
3c6e6ae7 | 1925 | struct pci_dev *pci_alloc_dev(struct pci_bus *bus) |
65891215 ME |
1926 | { |
1927 | struct pci_dev *dev; | |
1928 | ||
1929 | dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); | |
1930 | if (!dev) | |
1931 | return NULL; | |
1932 | ||
65891215 | 1933 | INIT_LIST_HEAD(&dev->bus_list); |
88e7b167 | 1934 | dev->dev.type = &pci_dev_type; |
3c6e6ae7 | 1935 | dev->bus = pci_bus_get(bus); |
65891215 ME |
1936 | |
1937 | return dev; | |
1938 | } | |
3c6e6ae7 GZ |
1939 | EXPORT_SYMBOL(pci_alloc_dev); |
1940 | ||
62bc6a6f SK |
1941 | static bool pci_bus_crs_vendor_id(u32 l) |
1942 | { | |
1943 | return (l & 0xffff) == 0x0001; | |
1944 | } | |
1945 | ||
6a802ef0 SK |
1946 | static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, |
1947 | int timeout) | |
1da177e4 | 1948 | { |
1da177e4 LT |
1949 | int delay = 1; |
1950 | ||
6a802ef0 SK |
1951 | if (!pci_bus_crs_vendor_id(*l)) |
1952 | return true; /* not a CRS completion */ | |
1da177e4 | 1953 | |
6a802ef0 SK |
1954 | if (!timeout) |
1955 | return false; /* CRS, but caller doesn't want to wait */ | |
1da177e4 | 1956 | |
89665a6a | 1957 | /* |
6a802ef0 SK |
1958 | * We got the reserved Vendor ID that indicates a completion with |
1959 | * Configuration Request Retry Status (CRS). Retry until we get a | |
1960 | * valid Vendor ID or we time out. | |
89665a6a | 1961 | */ |
62bc6a6f | 1962 | while (pci_bus_crs_vendor_id(*l)) { |
6a802ef0 | 1963 | if (delay > timeout) { |
e78e661f SK |
1964 | pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", |
1965 | pci_domain_nr(bus), bus->number, | |
1966 | PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); | |
1967 | ||
efdc87da | 1968 | return false; |
1da177e4 | 1969 | } |
e78e661f SK |
1970 | if (delay >= 1000) |
1971 | pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n", | |
1972 | pci_domain_nr(bus), bus->number, | |
1973 | PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); | |
efdc87da | 1974 | |
1da177e4 LT |
1975 | msleep(delay); |
1976 | delay *= 2; | |
9f982756 | 1977 | |
efdc87da YL |
1978 | if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) |
1979 | return false; | |
1da177e4 LT |
1980 | } |
1981 | ||
e78e661f SK |
1982 | if (delay >= 1000) |
1983 | pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n", | |
1984 | pci_domain_nr(bus), bus->number, | |
1985 | PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); | |
1986 | ||
efdc87da YL |
1987 | return true; |
1988 | } | |
6a802ef0 SK |
1989 | |
1990 | bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, | |
1991 | int timeout) | |
1992 | { | |
1993 | if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) | |
1994 | return false; | |
1995 | ||
1996 | /* some broken boards return 0 or ~0 if a slot is empty: */ | |
1997 | if (*l == 0xffffffff || *l == 0x00000000 || | |
1998 | *l == 0x0000ffff || *l == 0xffff0000) | |
1999 | return false; | |
2000 | ||
2001 | if (pci_bus_crs_vendor_id(*l)) | |
2002 | return pci_bus_wait_crs(bus, devfn, l, timeout); | |
2003 | ||
efdc87da YL |
2004 | return true; |
2005 | } | |
2006 | EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); | |
2007 | ||
2008 | /* | |
2009 | * Read the config data for a PCI device, sanity-check it | |
2010 | * and fill in the dev structure... | |
2011 | */ | |
2012 | static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) | |
2013 | { | |
2014 | struct pci_dev *dev; | |
2015 | u32 l; | |
2016 | ||
2017 | if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) | |
2018 | return NULL; | |
2019 | ||
8b1fce04 | 2020 | dev = pci_alloc_dev(bus); |
1da177e4 LT |
2021 | if (!dev) |
2022 | return NULL; | |
2023 | ||
1da177e4 | 2024 | dev->devfn = devfn; |
1da177e4 LT |
2025 | dev->vendor = l & 0xffff; |
2026 | dev->device = (l >> 16) & 0xffff; | |
cef354db | 2027 | |
98d9f30c BH |
2028 | pci_set_of_node(dev); |
2029 | ||
480b93b7 | 2030 | if (pci_setup_device(dev)) { |
8b1fce04 | 2031 | pci_bus_put(dev->bus); |
1da177e4 LT |
2032 | kfree(dev); |
2033 | return NULL; | |
2034 | } | |
1da177e4 LT |
2035 | |
2036 | return dev; | |
2037 | } | |
2038 | ||
201de56e ZY |
2039 | static void pci_init_capabilities(struct pci_dev *dev) |
2040 | { | |
938174e5 SS |
2041 | /* Enhanced Allocation */ |
2042 | pci_ea_init(dev); | |
2043 | ||
e80e7edc GP |
2044 | /* Setup MSI caps & disable MSI/MSI-X interrupts */ |
2045 | pci_msi_setup_pci_dev(dev); | |
201de56e | 2046 | |
63f4898a RW |
2047 | /* Buffers for saving PCIe and PCI-X capabilities */ |
2048 | pci_allocate_cap_save_buffers(dev); | |
2049 | ||
201de56e ZY |
2050 | /* Power Management */ |
2051 | pci_pm_init(dev); | |
2052 | ||
2053 | /* Vital Product Data */ | |
f1cd93f9 | 2054 | pci_vpd_init(dev); |
58c3a727 YZ |
2055 | |
2056 | /* Alternative Routing-ID Forwarding */ | |
31ab2476 | 2057 | pci_configure_ari(dev); |
d1b054da YZ |
2058 | |
2059 | /* Single Root I/O Virtualization */ | |
2060 | pci_iov_init(dev); | |
ae21ee65 | 2061 | |
edc90fee BH |
2062 | /* Address Translation Services */ |
2063 | pci_ats_init(dev); | |
2064 | ||
ae21ee65 | 2065 | /* Enable ACS P2P upstream forwarding */ |
5d990b62 | 2066 | pci_enable_acs(dev); |
b07461a8 | 2067 | |
9bb04a0c JY |
2068 | /* Precision Time Measurement */ |
2069 | pci_ptm_init(dev); | |
4dc2db09 | 2070 | |
66b80809 KB |
2071 | /* Advanced Error Reporting */ |
2072 | pci_aer_init(dev); | |
201de56e ZY |
2073 | } |
2074 | ||
098259eb MZ |
2075 | /* |
2076 | * This is the equivalent of pci_host_bridge_msi_domain that acts on | |
2077 | * devices. Firmware interfaces that can select the MSI domain on a | |
2078 | * per-device basis should be called from here. | |
2079 | */ | |
2080 | static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev) | |
2081 | { | |
2082 | struct irq_domain *d; | |
2083 | ||
2084 | /* | |
2085 | * If a domain has been set through the pcibios_add_device | |
2086 | * callback, then this is the one (platform code knows best). | |
2087 | */ | |
2088 | d = dev_get_msi_domain(&dev->dev); | |
2089 | if (d) | |
2090 | return d; | |
2091 | ||
54fa97ee MZ |
2092 | /* |
2093 | * Let's see if we have a firmware interface able to provide | |
2094 | * the domain. | |
2095 | */ | |
2096 | d = pci_msi_get_device_domain(dev); | |
2097 | if (d) | |
2098 | return d; | |
2099 | ||
098259eb MZ |
2100 | return NULL; |
2101 | } | |
2102 | ||
44aa0c65 MZ |
2103 | static void pci_set_msi_domain(struct pci_dev *dev) |
2104 | { | |
098259eb MZ |
2105 | struct irq_domain *d; |
2106 | ||
44aa0c65 | 2107 | /* |
098259eb MZ |
2108 | * If the platform or firmware interfaces cannot supply a |
2109 | * device-specific MSI domain, then inherit the default domain | |
2110 | * from the host bridge itself. | |
44aa0c65 | 2111 | */ |
098259eb MZ |
2112 | d = pci_dev_msi_domain(dev); |
2113 | if (!d) | |
2114 | d = dev_get_msi_domain(&dev->bus->dev); | |
2115 | ||
2116 | dev_set_msi_domain(&dev->dev, d); | |
44aa0c65 MZ |
2117 | } |
2118 | ||
96bde06a | 2119 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
1da177e4 | 2120 | { |
4f535093 YL |
2121 | int ret; |
2122 | ||
6cd33649 BH |
2123 | pci_configure_device(dev); |
2124 | ||
cdb9b9f7 PM |
2125 | device_initialize(&dev->dev); |
2126 | dev->dev.release = pci_release_dev; | |
1da177e4 | 2127 | |
7629d19a | 2128 | set_dev_node(&dev->dev, pcibus_to_node(bus)); |
cdb9b9f7 | 2129 | dev->dev.dma_mask = &dev->dma_mask; |
4d57cdfa | 2130 | dev->dev.dma_parms = &dev->dma_parms; |
cdb9b9f7 | 2131 | dev->dev.coherent_dma_mask = 0xffffffffull; |
1da177e4 | 2132 | |
4d57cdfa | 2133 | pci_set_dma_max_seg_size(dev, 65536); |
59fc67de | 2134 | pci_set_dma_seg_boundary(dev, 0xffffffff); |
4d57cdfa | 2135 | |
1da177e4 LT |
2136 | /* Fix up broken headers */ |
2137 | pci_fixup_device(pci_fixup_header, dev); | |
2138 | ||
2069ecfb YL |
2139 | /* moved out from quirk header fixup code */ |
2140 | pci_reassigndev_resource_alignment(dev); | |
2141 | ||
4b77b0a2 RW |
2142 | /* Clear the state_saved flag. */ |
2143 | dev->state_saved = false; | |
2144 | ||
201de56e ZY |
2145 | /* Initialize various capabilities */ |
2146 | pci_init_capabilities(dev); | |
eb9d0fe4 | 2147 | |
1da177e4 LT |
2148 | /* |
2149 | * Add the device to our list of discovered devices | |
2150 | * and the bus list for fixup functions, etc. | |
2151 | */ | |
d71374da | 2152 | down_write(&pci_bus_sem); |
1da177e4 | 2153 | list_add_tail(&dev->bus_list, &bus->devices); |
d71374da | 2154 | up_write(&pci_bus_sem); |
4f535093 | 2155 | |
4f535093 YL |
2156 | ret = pcibios_add_device(dev); |
2157 | WARN_ON(ret < 0); | |
2158 | ||
44aa0c65 MZ |
2159 | /* Setup MSI irq domain */ |
2160 | pci_set_msi_domain(dev); | |
2161 | ||
4f535093 YL |
2162 | /* Notifier could use PCI capabilities */ |
2163 | dev->match_driver = false; | |
2164 | ret = device_add(&dev->dev); | |
2165 | WARN_ON(ret < 0); | |
cdb9b9f7 PM |
2166 | } |
2167 | ||
10874f5a | 2168 | struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) |
cdb9b9f7 PM |
2169 | { |
2170 | struct pci_dev *dev; | |
2171 | ||
90bdb311 TP |
2172 | dev = pci_get_slot(bus, devfn); |
2173 | if (dev) { | |
2174 | pci_dev_put(dev); | |
2175 | return dev; | |
2176 | } | |
2177 | ||
cdb9b9f7 PM |
2178 | dev = pci_scan_device(bus, devfn); |
2179 | if (!dev) | |
2180 | return NULL; | |
2181 | ||
2182 | pci_device_add(dev, bus); | |
1da177e4 LT |
2183 | |
2184 | return dev; | |
2185 | } | |
b73e9687 | 2186 | EXPORT_SYMBOL(pci_scan_single_device); |
1da177e4 | 2187 | |
b1bd58e4 | 2188 | static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn) |
f07852d6 | 2189 | { |
b1bd58e4 YW |
2190 | int pos; |
2191 | u16 cap = 0; | |
2192 | unsigned next_fn; | |
4fb88c1a | 2193 | |
b1bd58e4 YW |
2194 | if (pci_ari_enabled(bus)) { |
2195 | if (!dev) | |
2196 | return 0; | |
2197 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); | |
2198 | if (!pos) | |
2199 | return 0; | |
4fb88c1a | 2200 | |
b1bd58e4 YW |
2201 | pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); |
2202 | next_fn = PCI_ARI_CAP_NFN(cap); | |
2203 | if (next_fn <= fn) | |
2204 | return 0; /* protect against malformed list */ | |
f07852d6 | 2205 | |
b1bd58e4 YW |
2206 | return next_fn; |
2207 | } | |
2208 | ||
2209 | /* dev may be NULL for non-contiguous multifunction devices */ | |
2210 | if (!dev || dev->multifunction) | |
2211 | return (fn + 1) % 8; | |
f07852d6 | 2212 | |
f07852d6 MW |
2213 | return 0; |
2214 | } | |
2215 | ||
2216 | static int only_one_child(struct pci_bus *bus) | |
2217 | { | |
2218 | struct pci_dev *parent = bus->self; | |
284f5f9d | 2219 | |
f07852d6 MW |
2220 | if (!parent || !pci_is_pcie(parent)) |
2221 | return 0; | |
62f87c0e | 2222 | if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) |
284f5f9d | 2223 | return 1; |
5bbe029f BH |
2224 | |
2225 | /* | |
2226 | * PCIe downstream ports are bridges that normally lead to only a | |
2227 | * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all | |
2228 | * possible devices, not just device 0. See PCIe spec r3.0, | |
2229 | * sec 7.3.1. | |
2230 | */ | |
777e61ea | 2231 | if (parent->has_secondary_link && |
284f5f9d | 2232 | !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) |
f07852d6 MW |
2233 | return 1; |
2234 | return 0; | |
2235 | } | |
2236 | ||
1da177e4 LT |
2237 | /** |
2238 | * pci_scan_slot - scan a PCI slot on a bus for devices. | |
2239 | * @bus: PCI bus to scan | |
2240 | * @devfn: slot number to scan (must have zero function.) | |
2241 | * | |
2242 | * Scan a PCI slot on the specified PCI bus for devices, adding | |
2243 | * discovered devices to the @bus->devices list. New devices | |
8a1bc901 | 2244 | * will not have is_added set. |
1b69dfc6 TP |
2245 | * |
2246 | * Returns the number of new devices found. | |
1da177e4 | 2247 | */ |
96bde06a | 2248 | int pci_scan_slot(struct pci_bus *bus, int devfn) |
1da177e4 | 2249 | { |
f07852d6 | 2250 | unsigned fn, nr = 0; |
1b69dfc6 | 2251 | struct pci_dev *dev; |
f07852d6 MW |
2252 | |
2253 | if (only_one_child(bus) && (devfn > 0)) | |
2254 | return 0; /* Already scanned the entire slot */ | |
1da177e4 | 2255 | |
1b69dfc6 | 2256 | dev = pci_scan_single_device(bus, devfn); |
4fb88c1a MW |
2257 | if (!dev) |
2258 | return 0; | |
2259 | if (!dev->is_added) | |
1b69dfc6 TP |
2260 | nr++; |
2261 | ||
b1bd58e4 | 2262 | for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { |
f07852d6 MW |
2263 | dev = pci_scan_single_device(bus, devfn + fn); |
2264 | if (dev) { | |
2265 | if (!dev->is_added) | |
2266 | nr++; | |
2267 | dev->multifunction = 1; | |
1da177e4 LT |
2268 | } |
2269 | } | |
7d715a6c | 2270 | |
149e1637 SL |
2271 | /* only one slot has pcie device */ |
2272 | if (bus->self && nr) | |
7d715a6c SL |
2273 | pcie_aspm_init_link_state(bus->self); |
2274 | ||
1da177e4 LT |
2275 | return nr; |
2276 | } | |
b7fe9434 | 2277 | EXPORT_SYMBOL(pci_scan_slot); |
1da177e4 | 2278 | |
b03e7495 JM |
2279 | static int pcie_find_smpss(struct pci_dev *dev, void *data) |
2280 | { | |
2281 | u8 *smpss = data; | |
2282 | ||
2283 | if (!pci_is_pcie(dev)) | |
2284 | return 0; | |
2285 | ||
d4aa68f6 YW |
2286 | /* |
2287 | * We don't have a way to change MPS settings on devices that have | |
2288 | * drivers attached. A hot-added device might support only the minimum | |
2289 | * MPS setting (MPS=128). Therefore, if the fabric contains a bridge | |
2290 | * where devices may be hot-added, we limit the fabric MPS to 128 so | |
2291 | * hot-added devices will work correctly. | |
2292 | * | |
2293 | * However, if we hot-add a device to a slot directly below a Root | |
2294 | * Port, it's impossible for there to be other existing devices below | |
2295 | * the port. We don't limit the MPS in this case because we can | |
2296 | * reconfigure MPS on both the Root Port and the hot-added device, | |
2297 | * and there are no other devices involved. | |
2298 | * | |
2299 | * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. | |
b03e7495 | 2300 | */ |
d4aa68f6 YW |
2301 | if (dev->is_hotplug_bridge && |
2302 | pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) | |
b03e7495 JM |
2303 | *smpss = 0; |
2304 | ||
2305 | if (*smpss > dev->pcie_mpss) | |
2306 | *smpss = dev->pcie_mpss; | |
2307 | ||
2308 | return 0; | |
2309 | } | |
2310 | ||
2311 | static void pcie_write_mps(struct pci_dev *dev, int mps) | |
2312 | { | |
62f392ea | 2313 | int rc; |
b03e7495 JM |
2314 | |
2315 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | |
62f392ea | 2316 | mps = 128 << dev->pcie_mpss; |
b03e7495 | 2317 | |
62f87c0e YW |
2318 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && |
2319 | dev->bus->self) | |
62f392ea | 2320 | /* For "Performance", the assumption is made that |
b03e7495 JM |
2321 | * downstream communication will never be larger than |
2322 | * the MRRS. So, the MPS only needs to be configured | |
2323 | * for the upstream communication. This being the case, | |
2324 | * walk from the top down and set the MPS of the child | |
2325 | * to that of the parent bus. | |
62f392ea JM |
2326 | * |
2327 | * Configure the device MPS with the smaller of the | |
2328 | * device MPSS or the bridge MPS (which is assumed to be | |
2329 | * properly configured at this point to the largest | |
2330 | * allowable MPS based on its parent bus). | |
b03e7495 | 2331 | */ |
62f392ea | 2332 | mps = min(mps, pcie_get_mps(dev->bus->self)); |
b03e7495 JM |
2333 | } |
2334 | ||
2335 | rc = pcie_set_mps(dev, mps); | |
2336 | if (rc) | |
2337 | dev_err(&dev->dev, "Failed attempting to set the MPS\n"); | |
2338 | } | |
2339 | ||
62f392ea | 2340 | static void pcie_write_mrrs(struct pci_dev *dev) |
b03e7495 | 2341 | { |
62f392ea | 2342 | int rc, mrrs; |
b03e7495 | 2343 | |
ed2888e9 JM |
2344 | /* In the "safe" case, do not configure the MRRS. There appear to be |
2345 | * issues with setting MRRS to 0 on a number of devices. | |
2346 | */ | |
ed2888e9 JM |
2347 | if (pcie_bus_config != PCIE_BUS_PERFORMANCE) |
2348 | return; | |
2349 | ||
ed2888e9 JM |
2350 | /* For Max performance, the MRRS must be set to the largest supported |
2351 | * value. However, it cannot be configured larger than the MPS the | |
62f392ea JM |
2352 | * device or the bus can support. This should already be properly |
2353 | * configured by a prior call to pcie_write_mps. | |
ed2888e9 | 2354 | */ |
62f392ea | 2355 | mrrs = pcie_get_mps(dev); |
b03e7495 JM |
2356 | |
2357 | /* MRRS is a R/W register. Invalid values can be written, but a | |
ed2888e9 | 2358 | * subsequent read will verify if the value is acceptable or not. |
b03e7495 JM |
2359 | * If the MRRS value provided is not acceptable (e.g., too large), |
2360 | * shrink the value until it is acceptable to the HW. | |
f7625980 | 2361 | */ |
b03e7495 JM |
2362 | while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { |
2363 | rc = pcie_set_readrq(dev, mrrs); | |
62f392ea JM |
2364 | if (!rc) |
2365 | break; | |
b03e7495 | 2366 | |
62f392ea | 2367 | dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); |
b03e7495 JM |
2368 | mrrs /= 2; |
2369 | } | |
62f392ea JM |
2370 | |
2371 | if (mrrs < 128) | |
227f0647 | 2372 | dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n"); |
b03e7495 JM |
2373 | } |
2374 | ||
2375 | static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | |
2376 | { | |
a513a99a | 2377 | int mps, orig_mps; |
b03e7495 JM |
2378 | |
2379 | if (!pci_is_pcie(dev)) | |
2380 | return 0; | |
2381 | ||
27d868b5 KB |
2382 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF || |
2383 | pcie_bus_config == PCIE_BUS_DEFAULT) | |
5895af79 | 2384 | return 0; |
5895af79 | 2385 | |
a513a99a JM |
2386 | mps = 128 << *(u8 *)data; |
2387 | orig_mps = pcie_get_mps(dev); | |
b03e7495 JM |
2388 | |
2389 | pcie_write_mps(dev, mps); | |
62f392ea | 2390 | pcie_write_mrrs(dev); |
b03e7495 | 2391 | |
227f0647 RD |
2392 | dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n", |
2393 | pcie_get_mps(dev), 128 << dev->pcie_mpss, | |
a513a99a | 2394 | orig_mps, pcie_get_readrq(dev)); |
b03e7495 JM |
2395 | |
2396 | return 0; | |
2397 | } | |
2398 | ||
a513a99a | 2399 | /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, |
b03e7495 JM |
2400 | * parents then children fashion. If this changes, then this code will not |
2401 | * work as designed. | |
2402 | */ | |
a58674ff | 2403 | void pcie_bus_configure_settings(struct pci_bus *bus) |
b03e7495 | 2404 | { |
1e358f94 | 2405 | u8 smpss = 0; |
b03e7495 | 2406 | |
a58674ff | 2407 | if (!bus->self) |
b03e7495 JM |
2408 | return; |
2409 | ||
b03e7495 | 2410 | if (!pci_is_pcie(bus->self)) |
5f39e670 JM |
2411 | return; |
2412 | ||
2413 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need | |
3315472c | 2414 | * to be aware of the MPS of the destination. To work around this, |
5f39e670 JM |
2415 | * simply force the MPS of the entire system to the smallest possible. |
2416 | */ | |
2417 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) | |
2418 | smpss = 0; | |
2419 | ||
b03e7495 | 2420 | if (pcie_bus_config == PCIE_BUS_SAFE) { |
a58674ff | 2421 | smpss = bus->self->pcie_mpss; |
5f39e670 | 2422 | |
b03e7495 JM |
2423 | pcie_find_smpss(bus->self, &smpss); |
2424 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | |
2425 | } | |
2426 | ||
2427 | pcie_bus_configure_set(bus->self, &smpss); | |
2428 | pci_walk_bus(bus, pcie_bus_configure_set, &smpss); | |
2429 | } | |
debc3b77 | 2430 | EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); |
b03e7495 | 2431 | |
bccf90d6 PD |
2432 | /* |
2433 | * Called after each bus is probed, but before its children are examined. This | |
2434 | * is marked as __weak because multiple architectures define it. | |
2435 | */ | |
2436 | void __weak pcibios_fixup_bus(struct pci_bus *bus) | |
2437 | { | |
2438 | /* nothing to do, expected to be removed in the future */ | |
2439 | } | |
2440 | ||
1c02ea81 MW |
2441 | /** |
2442 | * pci_scan_child_bus_extend() - Scan devices below a bus | |
2443 | * @bus: Bus to scan for devices | |
2444 | * @available_buses: Total number of buses available (%0 does not try to | |
2445 | * extend beyond the minimal) | |
2446 | * | |
2447 | * Scans devices below @bus including subordinate buses. Returns new | |
2448 | * subordinate number including all the found devices. Passing | |
2449 | * @available_buses causes the remaining bus space to be distributed | |
2450 | * equally between hotplug-capable bridges to allow future extension of the | |
2451 | * hierarchy. | |
2452 | */ | |
2453 | static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, | |
2454 | unsigned int available_buses) | |
1da177e4 | 2455 | { |
1c02ea81 MW |
2456 | unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0; |
2457 | unsigned int start = bus->busn_res.start; | |
2458 | unsigned int devfn, cmax, max = start; | |
1da177e4 LT |
2459 | struct pci_dev *dev; |
2460 | ||
0207c356 | 2461 | dev_dbg(&bus->dev, "scanning bus\n"); |
1da177e4 LT |
2462 | |
2463 | /* Go find them, Rover! */ | |
2464 | for (devfn = 0; devfn < 0x100; devfn += 8) | |
2465 | pci_scan_slot(bus, devfn); | |
2466 | ||
a28724b0 | 2467 | /* Reserve buses for SR-IOV capability. */ |
1c02ea81 MW |
2468 | used_buses = pci_iov_bus_range(bus); |
2469 | max += used_buses; | |
a28724b0 | 2470 | |
1da177e4 LT |
2471 | /* |
2472 | * After performing arch-dependent fixup of the bus, look behind | |
2473 | * all PCI-to-PCI bridges on this bus. | |
2474 | */ | |
74710ded | 2475 | if (!bus->is_added) { |
0207c356 | 2476 | dev_dbg(&bus->dev, "fixups for bus\n"); |
74710ded | 2477 | pcibios_fixup_bus(bus); |
981cf9ea | 2478 | bus->is_added = 1; |
74710ded AC |
2479 | } |
2480 | ||
1c02ea81 MW |
2481 | /* |
2482 | * Calculate how many hotplug bridges and normal bridges there | |
2483 | * are on this bus. We will distribute the additional available | |
2484 | * buses between hotplug bridges. | |
2485 | */ | |
2486 | for_each_pci_bridge(dev, bus) { | |
2487 | if (dev->is_hotplug_bridge) | |
2488 | hotplug_bridges++; | |
2489 | else | |
2490 | normal_bridges++; | |
2491 | } | |
2492 | ||
4147c2fd MW |
2493 | /* |
2494 | * Scan bridges that are already configured. We don't touch them | |
2495 | * unless they are misconfigured (which will be done in the second | |
2496 | * scan below). | |
2497 | */ | |
1c02ea81 MW |
2498 | for_each_pci_bridge(dev, bus) { |
2499 | cmax = max; | |
2500 | max = pci_scan_bridge_extend(bus, dev, max, 0, 0); | |
2501 | used_buses += cmax - max; | |
2502 | } | |
4147c2fd MW |
2503 | |
2504 | /* Scan bridges that need to be reconfigured */ | |
1c02ea81 MW |
2505 | for_each_pci_bridge(dev, bus) { |
2506 | unsigned int buses = 0; | |
2507 | ||
2508 | if (!hotplug_bridges && normal_bridges == 1) { | |
2509 | /* | |
2510 | * There is only one bridge on the bus (upstream | |
2511 | * port) so it gets all available buses which it | |
2512 | * can then distribute to the possible hotplug | |
2513 | * bridges below. | |
2514 | */ | |
2515 | buses = available_buses; | |
2516 | } else if (dev->is_hotplug_bridge) { | |
2517 | /* | |
2518 | * Distribute the extra buses between hotplug | |
2519 | * bridges if any. | |
2520 | */ | |
2521 | buses = available_buses / hotplug_bridges; | |
2522 | buses = min(buses, available_buses - used_buses); | |
2523 | } | |
2524 | ||
2525 | cmax = max; | |
2526 | max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1); | |
2527 | used_buses += max - cmax; | |
2528 | } | |
1da177e4 | 2529 | |
e16b4660 KB |
2530 | /* |
2531 | * Make sure a hotplug bridge has at least the minimum requested | |
1c02ea81 MW |
2532 | * number of buses but allow it to grow up to the maximum available |
2533 | * bus number of there is room. | |
e16b4660 | 2534 | */ |
1c02ea81 MW |
2535 | if (bus->self && bus->self->is_hotplug_bridge) { |
2536 | used_buses = max_t(unsigned int, available_buses, | |
2537 | pci_hotplug_bus_size - 1); | |
2538 | if (max - start < used_buses) { | |
2539 | max = start + used_buses; | |
2540 | ||
2541 | /* Do not allocate more buses than we have room left */ | |
2542 | if (max > bus->busn_res.end) | |
2543 | max = bus->busn_res.end; | |
2544 | ||
2545 | dev_dbg(&bus->dev, "%pR extended by %#02x\n", | |
2546 | &bus->busn_res, max - start); | |
2547 | } | |
e16b4660 KB |
2548 | } |
2549 | ||
1da177e4 LT |
2550 | /* |
2551 | * We've scanned the bus and so we know all about what's on | |
2552 | * the other side of any bridges that may be on this bus plus | |
2553 | * any devices. | |
2554 | * | |
2555 | * Return how far we've got finding sub-buses. | |
2556 | */ | |
0207c356 | 2557 | dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); |
1da177e4 LT |
2558 | return max; |
2559 | } | |
1c02ea81 MW |
2560 | |
2561 | /** | |
2562 | * pci_scan_child_bus() - Scan devices below a bus | |
2563 | * @bus: Bus to scan for devices | |
2564 | * | |
2565 | * Scans devices below @bus including subordinate buses. Returns new | |
2566 | * subordinate number including all the found devices. | |
2567 | */ | |
2568 | unsigned int pci_scan_child_bus(struct pci_bus *bus) | |
2569 | { | |
2570 | return pci_scan_child_bus_extend(bus, 0); | |
2571 | } | |
b7fe9434 | 2572 | EXPORT_SYMBOL_GPL(pci_scan_child_bus); |
1da177e4 | 2573 | |
6c0cc950 RW |
2574 | /** |
2575 | * pcibios_root_bridge_prepare - Platform-specific host bridge setup. | |
2576 | * @bridge: Host bridge to set up. | |
2577 | * | |
2578 | * Default empty implementation. Replace with an architecture-specific setup | |
2579 | * routine, if necessary. | |
2580 | */ | |
2581 | int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) | |
2582 | { | |
2583 | return 0; | |
2584 | } | |
2585 | ||
10a95747 JL |
2586 | void __weak pcibios_add_bus(struct pci_bus *bus) |
2587 | { | |
2588 | } | |
2589 | ||
2590 | void __weak pcibios_remove_bus(struct pci_bus *bus) | |
2591 | { | |
2592 | } | |
2593 | ||
9ee8a1c4 LP |
2594 | struct pci_bus *pci_create_root_bus(struct device *parent, int bus, |
2595 | struct pci_ops *ops, void *sysdata, struct list_head *resources) | |
1da177e4 | 2596 | { |
0efd5aab | 2597 | int error; |
5a21d70d | 2598 | struct pci_host_bridge *bridge; |
1da177e4 | 2599 | |
59094065 | 2600 | bridge = pci_alloc_host_bridge(0); |
7b543663 | 2601 | if (!bridge) |
37d6a0a6 | 2602 | return NULL; |
7b543663 YL |
2603 | |
2604 | bridge->dev.parent = parent; | |
a9d9f527 | 2605 | |
37d6a0a6 AB |
2606 | list_splice_init(resources, &bridge->windows); |
2607 | bridge->sysdata = sysdata; | |
2608 | bridge->busnr = bus; | |
2609 | bridge->ops = ops; | |
a9d9f527 | 2610 | |
37d6a0a6 AB |
2611 | error = pci_register_host_bridge(bridge); |
2612 | if (error < 0) | |
2613 | goto err_out; | |
a5390aa6 | 2614 | |
37d6a0a6 | 2615 | return bridge->bus; |
1da177e4 | 2616 | |
1da177e4 | 2617 | err_out: |
37d6a0a6 | 2618 | kfree(bridge); |
1da177e4 LT |
2619 | return NULL; |
2620 | } | |
e6b29dea | 2621 | EXPORT_SYMBOL_GPL(pci_create_root_bus); |
cdb9b9f7 | 2622 | |
98a35831 YL |
2623 | int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) |
2624 | { | |
2625 | struct resource *res = &b->busn_res; | |
2626 | struct resource *parent_res, *conflict; | |
2627 | ||
2628 | res->start = bus; | |
2629 | res->end = bus_max; | |
2630 | res->flags = IORESOURCE_BUS; | |
2631 | ||
2632 | if (!pci_is_root_bus(b)) | |
2633 | parent_res = &b->parent->busn_res; | |
2634 | else { | |
2635 | parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); | |
2636 | res->flags |= IORESOURCE_PCI_FIXED; | |
2637 | } | |
2638 | ||
ced04d15 | 2639 | conflict = request_resource_conflict(parent_res, res); |
98a35831 YL |
2640 | |
2641 | if (conflict) | |
2642 | dev_printk(KERN_DEBUG, &b->dev, | |
2643 | "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", | |
2644 | res, pci_is_root_bus(b) ? "domain " : "", | |
2645 | parent_res, conflict->name, conflict); | |
98a35831 YL |
2646 | |
2647 | return conflict == NULL; | |
2648 | } | |
2649 | ||
2650 | int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) | |
2651 | { | |
2652 | struct resource *res = &b->busn_res; | |
2653 | struct resource old_res = *res; | |
2654 | resource_size_t size; | |
2655 | int ret; | |
2656 | ||
2657 | if (res->start > bus_max) | |
2658 | return -EINVAL; | |
2659 | ||
2660 | size = bus_max - res->start + 1; | |
2661 | ret = adjust_resource(res, res->start, size); | |
2662 | dev_printk(KERN_DEBUG, &b->dev, | |
2663 | "busn_res: %pR end %s updated to %02x\n", | |
2664 | &old_res, ret ? "can not be" : "is", bus_max); | |
2665 | ||
2666 | if (!ret && !res->parent) | |
2667 | pci_bus_insert_busn_res(b, res->start, res->end); | |
2668 | ||
2669 | return ret; | |
2670 | } | |
2671 | ||
2672 | void pci_bus_release_busn_res(struct pci_bus *b) | |
2673 | { | |
2674 | struct resource *res = &b->busn_res; | |
2675 | int ret; | |
2676 | ||
2677 | if (!res->flags || !res->parent) | |
2678 | return; | |
2679 | ||
2680 | ret = release_resource(res); | |
2681 | dev_printk(KERN_DEBUG, &b->dev, | |
2682 | "busn_res: %pR %s released\n", | |
2683 | res, ret ? "can not be" : "is"); | |
2684 | } | |
2685 | ||
1228c4b6 | 2686 | int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) |
a2ebb827 | 2687 | { |
14d76b68 | 2688 | struct resource_entry *window; |
4d99f524 | 2689 | bool found = false; |
a2ebb827 | 2690 | struct pci_bus *b; |
1228c4b6 | 2691 | int max, bus, ret; |
4d99f524 | 2692 | |
1228c4b6 LP |
2693 | if (!bridge) |
2694 | return -EINVAL; | |
2695 | ||
2696 | resource_list_for_each_entry(window, &bridge->windows) | |
4d99f524 YL |
2697 | if (window->res->flags & IORESOURCE_BUS) { |
2698 | found = true; | |
2699 | break; | |
2700 | } | |
a2ebb827 | 2701 | |
1228c4b6 LP |
2702 | ret = pci_register_host_bridge(bridge); |
2703 | if (ret < 0) | |
2704 | return ret; | |
2705 | ||
2706 | b = bridge->bus; | |
2707 | bus = bridge->busnr; | |
a2ebb827 | 2708 | |
4d99f524 YL |
2709 | if (!found) { |
2710 | dev_info(&b->dev, | |
2711 | "No busn resource found for root bus, will use [bus %02x-ff]\n", | |
2712 | bus); | |
2713 | pci_bus_insert_busn_res(b, bus, 255); | |
2714 | } | |
2715 | ||
2716 | max = pci_scan_child_bus(b); | |
2717 | ||
2718 | if (!found) | |
2719 | pci_bus_update_busn_res_end(b, max); | |
2720 | ||
1228c4b6 | 2721 | return 0; |
a2ebb827 | 2722 | } |
1228c4b6 | 2723 | EXPORT_SYMBOL(pci_scan_root_bus_bridge); |
d2a7926d LP |
2724 | |
2725 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, | |
2726 | struct pci_ops *ops, void *sysdata, struct list_head *resources) | |
2727 | { | |
14d76b68 | 2728 | struct resource_entry *window; |
4d99f524 | 2729 | bool found = false; |
a2ebb827 | 2730 | struct pci_bus *b; |
4d99f524 YL |
2731 | int max; |
2732 | ||
14d76b68 | 2733 | resource_list_for_each_entry(window, resources) |
4d99f524 YL |
2734 | if (window->res->flags & IORESOURCE_BUS) { |
2735 | found = true; | |
2736 | break; | |
2737 | } | |
a2ebb827 | 2738 | |
9ee8a1c4 | 2739 | b = pci_create_root_bus(parent, bus, ops, sysdata, resources); |
a2ebb827 BH |
2740 | if (!b) |
2741 | return NULL; | |
2742 | ||
4d99f524 YL |
2743 | if (!found) { |
2744 | dev_info(&b->dev, | |
2745 | "No busn resource found for root bus, will use [bus %02x-ff]\n", | |
2746 | bus); | |
2747 | pci_bus_insert_busn_res(b, bus, 255); | |
2748 | } | |
2749 | ||
2750 | max = pci_scan_child_bus(b); | |
2751 | ||
2752 | if (!found) | |
2753 | pci_bus_update_busn_res_end(b, max); | |
2754 | ||
a2ebb827 | 2755 | return b; |
d2a7926d | 2756 | } |
a2ebb827 BH |
2757 | EXPORT_SYMBOL(pci_scan_root_bus); |
2758 | ||
15856ad5 | 2759 | struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, |
de4b2f76 BH |
2760 | void *sysdata) |
2761 | { | |
2762 | LIST_HEAD(resources); | |
2763 | struct pci_bus *b; | |
2764 | ||
2765 | pci_add_resource(&resources, &ioport_resource); | |
2766 | pci_add_resource(&resources, &iomem_resource); | |
857c3b66 | 2767 | pci_add_resource(&resources, &busn_resource); |
de4b2f76 BH |
2768 | b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); |
2769 | if (b) { | |
857c3b66 | 2770 | pci_scan_child_bus(b); |
de4b2f76 BH |
2771 | } else { |
2772 | pci_free_resource_list(&resources); | |
2773 | } | |
2774 | return b; | |
2775 | } | |
2776 | EXPORT_SYMBOL(pci_scan_bus); | |
2777 | ||
2f320521 YL |
2778 | /** |
2779 | * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. | |
2780 | * @bridge: PCI bridge for the bus to scan | |
2781 | * | |
2782 | * Scan a PCI bus and child buses for new devices, add them, | |
2783 | * and enable them, resizing bridge mmio/io resource if necessary | |
2784 | * and possible. The caller must ensure the child devices are already | |
2785 | * removed for resizing to occur. | |
2786 | * | |
2787 | * Returns the max number of subordinate bus discovered. | |
2788 | */ | |
10874f5a | 2789 | unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) |
2f320521 YL |
2790 | { |
2791 | unsigned int max; | |
2792 | struct pci_bus *bus = bridge->subordinate; | |
2793 | ||
2794 | max = pci_scan_child_bus(bus); | |
2795 | ||
2796 | pci_assign_unassigned_bridge_resources(bridge); | |
2797 | ||
2798 | pci_bus_add_devices(bus); | |
2799 | ||
2800 | return max; | |
2801 | } | |
2802 | ||
a5213a31 YL |
2803 | /** |
2804 | * pci_rescan_bus - scan a PCI bus for devices. | |
2805 | * @bus: PCI bus to scan | |
2806 | * | |
2807 | * Scan a PCI bus and child buses for new devices, adds them, | |
2808 | * and enables them. | |
2809 | * | |
2810 | * Returns the max number of subordinate bus discovered. | |
2811 | */ | |
10874f5a | 2812 | unsigned int pci_rescan_bus(struct pci_bus *bus) |
a5213a31 YL |
2813 | { |
2814 | unsigned int max; | |
2815 | ||
2816 | max = pci_scan_child_bus(bus); | |
2817 | pci_assign_unassigned_bus_resources(bus); | |
2818 | pci_bus_add_devices(bus); | |
2819 | ||
2820 | return max; | |
2821 | } | |
2822 | EXPORT_SYMBOL_GPL(pci_rescan_bus); | |
2823 | ||
9d16947b RW |
2824 | /* |
2825 | * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal | |
2826 | * routines should always be executed under this mutex. | |
2827 | */ | |
2828 | static DEFINE_MUTEX(pci_rescan_remove_lock); | |
2829 | ||
2830 | void pci_lock_rescan_remove(void) | |
2831 | { | |
2832 | mutex_lock(&pci_rescan_remove_lock); | |
2833 | } | |
2834 | EXPORT_SYMBOL_GPL(pci_lock_rescan_remove); | |
2835 | ||
2836 | void pci_unlock_rescan_remove(void) | |
2837 | { | |
2838 | mutex_unlock(&pci_rescan_remove_lock); | |
2839 | } | |
2840 | EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove); | |
2841 | ||
3c78bc61 RD |
2842 | static int __init pci_sort_bf_cmp(const struct device *d_a, |
2843 | const struct device *d_b) | |
6b4b78fe | 2844 | { |
99178b03 GKH |
2845 | const struct pci_dev *a = to_pci_dev(d_a); |
2846 | const struct pci_dev *b = to_pci_dev(d_b); | |
2847 | ||
6b4b78fe MD |
2848 | if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; |
2849 | else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; | |
2850 | ||
2851 | if (a->bus->number < b->bus->number) return -1; | |
2852 | else if (a->bus->number > b->bus->number) return 1; | |
2853 | ||
2854 | if (a->devfn < b->devfn) return -1; | |
2855 | else if (a->devfn > b->devfn) return 1; | |
2856 | ||
2857 | return 0; | |
2858 | } | |
2859 | ||
5ff580c1 | 2860 | void __init pci_sort_breadthfirst(void) |
6b4b78fe | 2861 | { |
99178b03 | 2862 | bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); |
6b4b78fe | 2863 | } |
95e3ba97 MW |
2864 | |
2865 | int pci_hp_add_bridge(struct pci_dev *dev) | |
2866 | { | |
2867 | struct pci_bus *parent = dev->bus; | |
4147c2fd | 2868 | int busnr, start = parent->busn_res.start; |
1c02ea81 | 2869 | unsigned int available_buses = 0; |
95e3ba97 MW |
2870 | int end = parent->busn_res.end; |
2871 | ||
2872 | for (busnr = start; busnr <= end; busnr++) { | |
2873 | if (!pci_find_bus(pci_domain_nr(parent), busnr)) | |
2874 | break; | |
2875 | } | |
2876 | if (busnr-- > end) { | |
2877 | dev_err(&dev->dev, "No bus number available for hot-added bridge\n"); | |
2878 | return -1; | |
2879 | } | |
4147c2fd MW |
2880 | |
2881 | /* Scan bridges that are already configured */ | |
2882 | busnr = pci_scan_bridge(parent, dev, busnr, 0); | |
2883 | ||
1c02ea81 MW |
2884 | /* |
2885 | * Distribute the available bus numbers between hotplug-capable | |
2886 | * bridges to make extending the chain later possible. | |
2887 | */ | |
2888 | available_buses = end - busnr; | |
2889 | ||
4147c2fd | 2890 | /* Scan bridges that need to be reconfigured */ |
1c02ea81 | 2891 | pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1); |
4147c2fd | 2892 | |
95e3ba97 MW |
2893 | if (!dev->subordinate) |
2894 | return -1; | |
2895 | ||
2896 | return 0; | |
2897 | } | |
2898 | EXPORT_SYMBOL_GPL(pci_hp_add_bridge); |