]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ | |
3 | * | |
4 | * PCI Bus Services, see include/linux/pci.h for further explanation. | |
5 | * | |
6 | * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, | |
7 | * David Mosberger-Tang | |
8 | * | |
9 | * Copyright 1997 -- 2000 Martin Mares <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/spinlock.h> | |
4e57b681 | 18 | #include <linux/string.h> |
1da177e4 | 19 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ |
bc56b9e0 | 20 | #include "pci.h" |
1da177e4 LT |
21 | |
22 | ||
23 | /** | |
24 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children | |
25 | * @bus: pointer to PCI bus structure to search | |
26 | * | |
27 | * Given a PCI bus, returns the highest PCI bus number present in the set | |
28 | * including the given PCI bus and its list of child PCI buses. | |
29 | */ | |
30 | unsigned char __devinit | |
31 | pci_bus_max_busnr(struct pci_bus* bus) | |
32 | { | |
33 | struct list_head *tmp; | |
34 | unsigned char max, n; | |
35 | ||
b82db5ce | 36 | max = bus->subordinate; |
1da177e4 LT |
37 | list_for_each(tmp, &bus->children) { |
38 | n = pci_bus_max_busnr(pci_bus_b(tmp)); | |
39 | if(n > max) | |
40 | max = n; | |
41 | } | |
42 | return max; | |
43 | } | |
b82db5ce | 44 | EXPORT_SYMBOL_GPL(pci_bus_max_busnr); |
1da177e4 | 45 | |
b82db5ce | 46 | #if 0 |
1da177e4 LT |
47 | /** |
48 | * pci_max_busnr - returns maximum PCI bus number | |
49 | * | |
50 | * Returns the highest PCI bus number present in the system global list of | |
51 | * PCI buses. | |
52 | */ | |
53 | unsigned char __devinit | |
54 | pci_max_busnr(void) | |
55 | { | |
56 | struct pci_bus *bus = NULL; | |
57 | unsigned char max, n; | |
58 | ||
59 | max = 0; | |
60 | while ((bus = pci_find_next_bus(bus)) != NULL) { | |
61 | n = pci_bus_max_busnr(bus); | |
62 | if(n > max) | |
63 | max = n; | |
64 | } | |
65 | return max; | |
66 | } | |
67 | ||
54c762fe AB |
68 | #endif /* 0 */ |
69 | ||
24a4e377 RD |
70 | static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) |
71 | { | |
72 | u8 id; | |
73 | int ttl = 48; | |
74 | ||
75 | while (ttl--) { | |
76 | pci_bus_read_config_byte(bus, devfn, pos, &pos); | |
77 | if (pos < 0x40) | |
78 | break; | |
79 | pos &= ~3; | |
80 | pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, | |
81 | &id); | |
82 | if (id == 0xff) | |
83 | break; | |
84 | if (id == cap) | |
85 | return pos; | |
86 | pos += PCI_CAP_LIST_NEXT; | |
87 | } | |
88 | return 0; | |
89 | } | |
90 | ||
91 | int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) | |
92 | { | |
93 | return __pci_find_next_cap(dev->bus, dev->devfn, | |
94 | pos + PCI_CAP_LIST_NEXT, cap); | |
95 | } | |
96 | EXPORT_SYMBOL_GPL(pci_find_next_capability); | |
97 | ||
1da177e4 LT |
98 | static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) |
99 | { | |
100 | u16 status; | |
24a4e377 | 101 | u8 pos; |
1da177e4 LT |
102 | |
103 | pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); | |
104 | if (!(status & PCI_STATUS_CAP_LIST)) | |
105 | return 0; | |
106 | ||
107 | switch (hdr_type) { | |
108 | case PCI_HEADER_TYPE_NORMAL: | |
109 | case PCI_HEADER_TYPE_BRIDGE: | |
24a4e377 | 110 | pos = PCI_CAPABILITY_LIST; |
1da177e4 LT |
111 | break; |
112 | case PCI_HEADER_TYPE_CARDBUS: | |
24a4e377 | 113 | pos = PCI_CB_CAPABILITY_LIST; |
1da177e4 LT |
114 | break; |
115 | default: | |
116 | return 0; | |
117 | } | |
24a4e377 | 118 | return __pci_find_next_cap(bus, devfn, pos, cap); |
1da177e4 LT |
119 | } |
120 | ||
121 | /** | |
122 | * pci_find_capability - query for devices' capabilities | |
123 | * @dev: PCI device to query | |
124 | * @cap: capability code | |
125 | * | |
126 | * Tell if a device supports a given PCI capability. | |
127 | * Returns the address of the requested capability structure within the | |
128 | * device's PCI configuration space or 0 in case the device does not | |
129 | * support it. Possible values for @cap: | |
130 | * | |
131 | * %PCI_CAP_ID_PM Power Management | |
132 | * %PCI_CAP_ID_AGP Accelerated Graphics Port | |
133 | * %PCI_CAP_ID_VPD Vital Product Data | |
134 | * %PCI_CAP_ID_SLOTID Slot Identification | |
135 | * %PCI_CAP_ID_MSI Message Signalled Interrupts | |
136 | * %PCI_CAP_ID_CHSWP CompactPCI HotSwap | |
137 | * %PCI_CAP_ID_PCIX PCI-X | |
138 | * %PCI_CAP_ID_EXP PCI Express | |
139 | */ | |
140 | int pci_find_capability(struct pci_dev *dev, int cap) | |
141 | { | |
142 | return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); | |
143 | } | |
144 | ||
145 | /** | |
146 | * pci_bus_find_capability - query for devices' capabilities | |
147 | * @bus: the PCI bus to query | |
148 | * @devfn: PCI device to query | |
149 | * @cap: capability code | |
150 | * | |
151 | * Like pci_find_capability() but works for pci devices that do not have a | |
152 | * pci_dev structure set up yet. | |
153 | * | |
154 | * Returns the address of the requested capability structure within the | |
155 | * device's PCI configuration space or 0 in case the device does not | |
156 | * support it. | |
157 | */ | |
158 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) | |
159 | { | |
160 | u8 hdr_type; | |
161 | ||
162 | pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); | |
163 | ||
164 | return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); | |
165 | } | |
166 | ||
167 | /** | |
168 | * pci_find_ext_capability - Find an extended capability | |
169 | * @dev: PCI device to query | |
170 | * @cap: capability code | |
171 | * | |
172 | * Returns the address of the requested extended capability structure | |
173 | * within the device's PCI configuration space or 0 if the device does | |
174 | * not support it. Possible values for @cap: | |
175 | * | |
176 | * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting | |
177 | * %PCI_EXT_CAP_ID_VC Virtual Channel | |
178 | * %PCI_EXT_CAP_ID_DSN Device Serial Number | |
179 | * %PCI_EXT_CAP_ID_PWR Power Budgeting | |
180 | */ | |
181 | int pci_find_ext_capability(struct pci_dev *dev, int cap) | |
182 | { | |
183 | u32 header; | |
184 | int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ | |
185 | int pos = 0x100; | |
186 | ||
187 | if (dev->cfg_size <= 256) | |
188 | return 0; | |
189 | ||
190 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) | |
191 | return 0; | |
192 | ||
193 | /* | |
194 | * If we have no capabilities, this is indicated by cap ID, | |
195 | * cap version and next pointer all being 0. | |
196 | */ | |
197 | if (header == 0) | |
198 | return 0; | |
199 | ||
200 | while (ttl-- > 0) { | |
201 | if (PCI_EXT_CAP_ID(header) == cap) | |
202 | return pos; | |
203 | ||
204 | pos = PCI_EXT_CAP_NEXT(header); | |
205 | if (pos < 0x100) | |
206 | break; | |
207 | ||
208 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) | |
209 | break; | |
210 | } | |
211 | ||
212 | return 0; | |
213 | } | |
3a720d72 | 214 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
1da177e4 LT |
215 | |
216 | /** | |
217 | * pci_find_parent_resource - return resource region of parent bus of given region | |
218 | * @dev: PCI device structure contains resources to be searched | |
219 | * @res: child resource record for which parent is sought | |
220 | * | |
221 | * For given resource region of given device, return the resource | |
222 | * region of parent bus the given region is contained in or where | |
223 | * it should be allocated from. | |
224 | */ | |
225 | struct resource * | |
226 | pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |
227 | { | |
228 | const struct pci_bus *bus = dev->bus; | |
229 | int i; | |
230 | struct resource *best = NULL; | |
231 | ||
232 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | |
233 | struct resource *r = bus->resource[i]; | |
234 | if (!r) | |
235 | continue; | |
236 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | |
237 | continue; /* Not contained */ | |
238 | if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) | |
239 | continue; /* Wrong type */ | |
240 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) | |
241 | return r; /* Exact match */ | |
242 | if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) | |
243 | best = r; /* Approximating prefetchable by non-prefetchable */ | |
244 | } | |
245 | return best; | |
246 | } | |
247 | ||
064b53db JL |
248 | /** |
249 | * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) | |
250 | * @dev: PCI device to have its BARs restored | |
251 | * | |
252 | * Restore the BAR values for a given device, so as to make it | |
253 | * accessible by its driver. | |
254 | */ | |
255 | void | |
256 | pci_restore_bars(struct pci_dev *dev) | |
257 | { | |
258 | int i, numres; | |
259 | ||
260 | switch (dev->hdr_type) { | |
261 | case PCI_HEADER_TYPE_NORMAL: | |
262 | numres = 6; | |
263 | break; | |
264 | case PCI_HEADER_TYPE_BRIDGE: | |
265 | numres = 2; | |
266 | break; | |
267 | case PCI_HEADER_TYPE_CARDBUS: | |
268 | numres = 1; | |
269 | break; | |
270 | default: | |
271 | /* Should never get here, but just in case... */ | |
272 | return; | |
273 | } | |
274 | ||
275 | for (i = 0; i < numres; i ++) | |
276 | pci_update_resource(dev, &dev->resource[i], i); | |
277 | } | |
278 | ||
8f7020d3 RD |
279 | int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); |
280 | ||
1da177e4 LT |
281 | /** |
282 | * pci_set_power_state - Set the power state of a PCI device | |
283 | * @dev: PCI device to be suspended | |
284 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering | |
285 | * | |
286 | * Transition a device to a new power state, using the Power Management | |
287 | * Capabilities in the device's config space. | |
288 | * | |
289 | * RETURN VALUE: | |
290 | * -EINVAL if trying to enter a lower state than we're already in. | |
291 | * 0 if we're already in the requested state. | |
292 | * -EIO if device does not support PCI PM. | |
293 | * 0 if we can successfully change the power state. | |
294 | */ | |
1da177e4 LT |
295 | int |
296 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |
297 | { | |
064b53db | 298 | int pm, need_restore = 0; |
1da177e4 LT |
299 | u16 pmcsr, pmc; |
300 | ||
301 | /* bound the state we're entering */ | |
302 | if (state > PCI_D3hot) | |
303 | state = PCI_D3hot; | |
304 | ||
305 | /* Validate current state: | |
306 | * Can enter D0 from any state, but if we can only go deeper | |
307 | * to sleep if we're already in a low power state | |
308 | */ | |
02669492 AM |
309 | if (state != PCI_D0 && dev->current_state > state) { |
310 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", | |
311 | __FUNCTION__, pci_name(dev), state, dev->current_state); | |
1da177e4 | 312 | return -EINVAL; |
02669492 | 313 | } else if (dev->current_state == state) |
1da177e4 LT |
314 | return 0; /* we're already there */ |
315 | ||
316 | /* find PCI PM capability in list */ | |
317 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); | |
318 | ||
319 | /* abort if the device doesn't support PM capabilities */ | |
320 | if (!pm) | |
321 | return -EIO; | |
322 | ||
323 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); | |
3fe9d19f | 324 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
1da177e4 LT |
325 | printk(KERN_DEBUG |
326 | "PCI: %s has unsupported PM cap regs version (%u)\n", | |
327 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); | |
328 | return -EIO; | |
329 | } | |
330 | ||
331 | /* check if this device supports the desired state */ | |
3fe9d19f DR |
332 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
333 | return -EIO; | |
334 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) | |
335 | return -EIO; | |
1da177e4 | 336 | |
064b53db JL |
337 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
338 | ||
32a36585 | 339 | /* If we're (effectively) in D3, force entire word to 0. |
1da177e4 LT |
340 | * This doesn't affect PME_Status, disables PME_En, and |
341 | * sets PowerState to 0. | |
342 | */ | |
32a36585 | 343 | switch (dev->current_state) { |
d3535fbb JL |
344 | case PCI_D0: |
345 | case PCI_D1: | |
346 | case PCI_D2: | |
347 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
348 | pmcsr |= state; | |
349 | break; | |
32a36585 JL |
350 | case PCI_UNKNOWN: /* Boot-up */ |
351 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot | |
352 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) | |
064b53db | 353 | need_restore = 1; |
32a36585 | 354 | /* Fall-through: force to D0 */ |
32a36585 | 355 | default: |
d3535fbb | 356 | pmcsr = 0; |
32a36585 | 357 | break; |
1da177e4 LT |
358 | } |
359 | ||
360 | /* enter specified state */ | |
361 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); | |
362 | ||
363 | /* Mandatory power management transition delays */ | |
364 | /* see PCI PM 1.1 5.6.1 table 18 */ | |
365 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | |
366 | msleep(10); | |
367 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | |
368 | udelay(200); | |
1da177e4 | 369 | |
b913100d DSL |
370 | /* |
371 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx | |
d6e05edc | 372 | * Firmware method after native method ? |
b913100d DSL |
373 | */ |
374 | if (platform_pci_set_power_state) | |
375 | platform_pci_set_power_state(dev, state); | |
376 | ||
377 | dev->current_state = state; | |
064b53db JL |
378 | |
379 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT | |
380 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning | |
381 | * from D3hot to D0 _may_ perform an internal reset, thereby | |
382 | * going to "D0 Uninitialized" rather than "D0 Initialized". | |
383 | * For example, at least some versions of the 3c905B and the | |
384 | * 3c556B exhibit this behaviour. | |
385 | * | |
386 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave | |
387 | * devices in a D3hot state at boot. Consequently, we need to | |
388 | * restore at least the BARs so that the device will be | |
389 | * accessible to its driver. | |
390 | */ | |
391 | if (need_restore) | |
392 | pci_restore_bars(dev); | |
393 | ||
1da177e4 LT |
394 | return 0; |
395 | } | |
396 | ||
f165b10f | 397 | int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); |
0f64474b | 398 | |
1da177e4 LT |
399 | /** |
400 | * pci_choose_state - Choose the power state of a PCI device | |
401 | * @dev: PCI device to be suspended | |
402 | * @state: target sleep state for the whole system. This is the value | |
403 | * that is passed to suspend() function. | |
404 | * | |
405 | * Returns PCI power state suitable for given device and given system | |
406 | * message. | |
407 | */ | |
408 | ||
409 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) | |
410 | { | |
0f64474b DSL |
411 | int ret; |
412 | ||
1da177e4 LT |
413 | if (!pci_find_capability(dev, PCI_CAP_ID_PM)) |
414 | return PCI_D0; | |
415 | ||
0f64474b DSL |
416 | if (platform_pci_choose_state) { |
417 | ret = platform_pci_choose_state(dev, state); | |
418 | if (ret >= 0) | |
ca078bae | 419 | state.event = ret; |
0f64474b | 420 | } |
ca078bae PM |
421 | |
422 | switch (state.event) { | |
423 | case PM_EVENT_ON: | |
424 | return PCI_D0; | |
425 | case PM_EVENT_FREEZE: | |
426 | case PM_EVENT_SUSPEND: | |
427 | return PCI_D3hot; | |
1da177e4 | 428 | default: |
ca078bae | 429 | printk("They asked me for state %d\n", state.event); |
1da177e4 LT |
430 | BUG(); |
431 | } | |
432 | return PCI_D0; | |
433 | } | |
434 | ||
435 | EXPORT_SYMBOL(pci_choose_state); | |
436 | ||
437 | /** | |
438 | * pci_save_state - save the PCI configuration space of a device before suspending | |
439 | * @dev: - PCI device that we're dealing with | |
1da177e4 LT |
440 | */ |
441 | int | |
442 | pci_save_state(struct pci_dev *dev) | |
443 | { | |
444 | int i; | |
445 | /* XXX: 100% dword access ok here? */ | |
446 | for (i = 0; i < 16; i++) | |
447 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); | |
41017f0c SL |
448 | if ((i = pci_save_msi_state(dev)) != 0) |
449 | return i; | |
450 | if ((i = pci_save_msix_state(dev)) != 0) | |
451 | return i; | |
1da177e4 LT |
452 | return 0; |
453 | } | |
454 | ||
455 | /** | |
456 | * pci_restore_state - Restore the saved state of a PCI device | |
457 | * @dev: - PCI device that we're dealing with | |
1da177e4 LT |
458 | */ |
459 | int | |
460 | pci_restore_state(struct pci_dev *dev) | |
461 | { | |
462 | int i; | |
04d9c1a1 | 463 | int val; |
1da177e4 | 464 | |
8b8c8d28 YL |
465 | /* |
466 | * The Base Address register should be programmed before the command | |
467 | * register(s) | |
468 | */ | |
469 | for (i = 15; i >= 0; i--) { | |
04d9c1a1 DJ |
470 | pci_read_config_dword(dev, i * 4, &val); |
471 | if (val != dev->saved_config_space[i]) { | |
472 | printk(KERN_DEBUG "PM: Writing back config space on " | |
473 | "device %s at offset %x (was %x, writing %x)\n", | |
474 | pci_name(dev), i, | |
475 | val, (int)dev->saved_config_space[i]); | |
476 | pci_write_config_dword(dev,i * 4, | |
477 | dev->saved_config_space[i]); | |
478 | } | |
479 | } | |
41017f0c SL |
480 | pci_restore_msi_state(dev); |
481 | pci_restore_msix_state(dev); | |
1da177e4 LT |
482 | return 0; |
483 | } | |
484 | ||
485 | /** | |
486 | * pci_enable_device_bars - Initialize some of a device for use | |
487 | * @dev: PCI device to be initialized | |
488 | * @bars: bitmask of BAR's that must be configured | |
489 | * | |
490 | * Initialize device before it's used by a driver. Ask low-level code | |
491 | * to enable selected I/O and memory resources. Wake up the device if it | |
492 | * was suspended. Beware, this function can fail. | |
493 | */ | |
494 | ||
495 | int | |
496 | pci_enable_device_bars(struct pci_dev *dev, int bars) | |
497 | { | |
498 | int err; | |
499 | ||
95a62965 | 500 | err = pci_set_power_state(dev, PCI_D0); |
11f3859b | 501 | if (err < 0 && err != -EIO) |
95a62965 GKH |
502 | return err; |
503 | err = pcibios_enable_device(dev, bars); | |
504 | if (err < 0) | |
1da177e4 LT |
505 | return err; |
506 | return 0; | |
507 | } | |
508 | ||
509 | /** | |
510 | * pci_enable_device - Initialize device before it's used by a driver. | |
511 | * @dev: PCI device to be initialized | |
512 | * | |
513 | * Initialize device before it's used by a driver. Ask low-level code | |
514 | * to enable I/O and memory. Wake up the device if it was suspended. | |
515 | * Beware, this function can fail. | |
516 | */ | |
517 | int | |
518 | pci_enable_device(struct pci_dev *dev) | |
519 | { | |
a1e022b3 KA |
520 | int err; |
521 | ||
522 | if (dev->is_enabled) | |
523 | return 0; | |
524 | ||
525 | err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); | |
b64c05e7 | 526 | if (err) |
1da177e4 LT |
527 | return err; |
528 | pci_fixup_device(pci_fixup_enable, dev); | |
ceb43744 | 529 | dev->is_enabled = 1; |
1da177e4 LT |
530 | return 0; |
531 | } | |
532 | ||
533 | /** | |
534 | * pcibios_disable_device - disable arch specific PCI resources for device dev | |
535 | * @dev: the PCI device to disable | |
536 | * | |
537 | * Disables architecture specific PCI resources for the device. This | |
538 | * is the default implementation. Architecture implementations can | |
539 | * override this. | |
540 | */ | |
541 | void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} | |
542 | ||
543 | /** | |
544 | * pci_disable_device - Disable PCI device after use | |
545 | * @dev: PCI device to be disabled | |
546 | * | |
547 | * Signal to the system that the PCI device is not in use by the system | |
548 | * anymore. This only involves disabling PCI bus-mastering, if active. | |
549 | */ | |
550 | void | |
551 | pci_disable_device(struct pci_dev *dev) | |
552 | { | |
553 | u16 pci_command; | |
99dc804d SL |
554 | |
555 | if (dev->msi_enabled) | |
556 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | |
557 | PCI_CAP_ID_MSI); | |
558 | if (dev->msix_enabled) | |
559 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | |
560 | PCI_CAP_ID_MSIX); | |
561 | ||
1da177e4 LT |
562 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); |
563 | if (pci_command & PCI_COMMAND_MASTER) { | |
564 | pci_command &= ~PCI_COMMAND_MASTER; | |
565 | pci_write_config_word(dev, PCI_COMMAND, pci_command); | |
566 | } | |
ceb43744 | 567 | dev->is_busmaster = 0; |
1da177e4 LT |
568 | |
569 | pcibios_disable_device(dev); | |
ceb43744 | 570 | dev->is_enabled = 0; |
1da177e4 LT |
571 | } |
572 | ||
573 | /** | |
574 | * pci_enable_wake - enable device to generate PME# when suspended | |
575 | * @dev: - PCI device to operate on | |
576 | * @state: - Current state of device. | |
577 | * @enable: - Flag to enable or disable generation | |
578 | * | |
579 | * Set the bits in the device's PM Capabilities to generate PME# when | |
580 | * the system is suspended. | |
581 | * | |
582 | * -EIO is returned if device doesn't have PM Capabilities. | |
583 | * -EINVAL is returned if device supports it, but can't generate wake events. | |
584 | * 0 if operation is successful. | |
585 | * | |
586 | */ | |
587 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) | |
588 | { | |
589 | int pm; | |
590 | u16 value; | |
591 | ||
592 | /* find PCI PM capability in list */ | |
593 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); | |
594 | ||
595 | /* If device doesn't support PM Capabilities, but request is to disable | |
596 | * wake events, it's a nop; otherwise fail */ | |
597 | if (!pm) | |
598 | return enable ? -EIO : 0; | |
599 | ||
600 | /* Check device's ability to generate PME# */ | |
601 | pci_read_config_word(dev,pm+PCI_PM_PMC,&value); | |
602 | ||
603 | value &= PCI_PM_CAP_PME_MASK; | |
604 | value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ | |
605 | ||
606 | /* Check if it can generate PME# from requested state. */ | |
607 | if (!value || !(value & (1 << state))) | |
608 | return enable ? -EINVAL : 0; | |
609 | ||
610 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); | |
611 | ||
612 | /* Clear PME_Status by writing 1 to it and enable PME# */ | |
613 | value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; | |
614 | ||
615 | if (!enable) | |
616 | value &= ~PCI_PM_CTRL_PME_ENABLE; | |
617 | ||
618 | pci_write_config_word(dev, pm + PCI_PM_CTRL, value); | |
619 | ||
620 | return 0; | |
621 | } | |
622 | ||
623 | int | |
624 | pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) | |
625 | { | |
626 | u8 pin; | |
627 | ||
514d207d | 628 | pin = dev->pin; |
1da177e4 LT |
629 | if (!pin) |
630 | return -1; | |
631 | pin--; | |
632 | while (dev->bus->self) { | |
633 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | |
634 | dev = dev->bus->self; | |
635 | } | |
636 | *bridge = dev; | |
637 | return pin; | |
638 | } | |
639 | ||
640 | /** | |
641 | * pci_release_region - Release a PCI bar | |
642 | * @pdev: PCI device whose resources were previously reserved by pci_request_region | |
643 | * @bar: BAR to release | |
644 | * | |
645 | * Releases the PCI I/O and memory resources previously reserved by a | |
646 | * successful call to pci_request_region. Call this function only | |
647 | * after all use of the PCI regions has ceased. | |
648 | */ | |
649 | void pci_release_region(struct pci_dev *pdev, int bar) | |
650 | { | |
651 | if (pci_resource_len(pdev, bar) == 0) | |
652 | return; | |
653 | if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) | |
654 | release_region(pci_resource_start(pdev, bar), | |
655 | pci_resource_len(pdev, bar)); | |
656 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) | |
657 | release_mem_region(pci_resource_start(pdev, bar), | |
658 | pci_resource_len(pdev, bar)); | |
659 | } | |
660 | ||
661 | /** | |
662 | * pci_request_region - Reserved PCI I/O and memory resource | |
663 | * @pdev: PCI device whose resources are to be reserved | |
664 | * @bar: BAR to be reserved | |
665 | * @res_name: Name to be associated with resource. | |
666 | * | |
667 | * Mark the PCI region associated with PCI device @pdev BR @bar as | |
668 | * being reserved by owner @res_name. Do not access any | |
669 | * address inside the PCI regions unless this call returns | |
670 | * successfully. | |
671 | * | |
672 | * Returns 0 on success, or %EBUSY on error. A warning | |
673 | * message is also printed on failure. | |
674 | */ | |
3c990e92 | 675 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) |
1da177e4 LT |
676 | { |
677 | if (pci_resource_len(pdev, bar) == 0) | |
678 | return 0; | |
679 | ||
680 | if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { | |
681 | if (!request_region(pci_resource_start(pdev, bar), | |
682 | pci_resource_len(pdev, bar), res_name)) | |
683 | goto err_out; | |
684 | } | |
685 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { | |
686 | if (!request_mem_region(pci_resource_start(pdev, bar), | |
687 | pci_resource_len(pdev, bar), res_name)) | |
688 | goto err_out; | |
689 | } | |
690 | ||
691 | return 0; | |
692 | ||
693 | err_out: | |
694 | printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n", | |
695 | pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", | |
696 | bar + 1, /* PCI BAR # */ | |
697 | pci_resource_len(pdev, bar), pci_resource_start(pdev, bar), | |
698 | pci_name(pdev)); | |
699 | return -EBUSY; | |
700 | } | |
701 | ||
702 | ||
703 | /** | |
704 | * pci_release_regions - Release reserved PCI I/O and memory resources | |
705 | * @pdev: PCI device whose resources were previously reserved by pci_request_regions | |
706 | * | |
707 | * Releases all PCI I/O and memory resources previously reserved by a | |
708 | * successful call to pci_request_regions. Call this function only | |
709 | * after all use of the PCI regions has ceased. | |
710 | */ | |
711 | ||
712 | void pci_release_regions(struct pci_dev *pdev) | |
713 | { | |
714 | int i; | |
715 | ||
716 | for (i = 0; i < 6; i++) | |
717 | pci_release_region(pdev, i); | |
718 | } | |
719 | ||
720 | /** | |
721 | * pci_request_regions - Reserved PCI I/O and memory resources | |
722 | * @pdev: PCI device whose resources are to be reserved | |
723 | * @res_name: Name to be associated with resource. | |
724 | * | |
725 | * Mark all PCI regions associated with PCI device @pdev as | |
726 | * being reserved by owner @res_name. Do not access any | |
727 | * address inside the PCI regions unless this call returns | |
728 | * successfully. | |
729 | * | |
730 | * Returns 0 on success, or %EBUSY on error. A warning | |
731 | * message is also printed on failure. | |
732 | */ | |
3c990e92 | 733 | int pci_request_regions(struct pci_dev *pdev, const char *res_name) |
1da177e4 LT |
734 | { |
735 | int i; | |
736 | ||
737 | for (i = 0; i < 6; i++) | |
738 | if(pci_request_region(pdev, i, res_name)) | |
739 | goto err_out; | |
740 | return 0; | |
741 | ||
742 | err_out: | |
743 | while(--i >= 0) | |
744 | pci_release_region(pdev, i); | |
745 | ||
746 | return -EBUSY; | |
747 | } | |
748 | ||
749 | /** | |
750 | * pci_set_master - enables bus-mastering for device dev | |
751 | * @dev: the PCI device to enable | |
752 | * | |
753 | * Enables bus-mastering on the device and calls pcibios_set_master() | |
754 | * to do the needed arch specific settings. | |
755 | */ | |
756 | void | |
757 | pci_set_master(struct pci_dev *dev) | |
758 | { | |
759 | u16 cmd; | |
760 | ||
761 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
762 | if (! (cmd & PCI_COMMAND_MASTER)) { | |
763 | pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); | |
764 | cmd |= PCI_COMMAND_MASTER; | |
765 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
766 | } | |
767 | dev->is_busmaster = 1; | |
768 | pcibios_set_master(dev); | |
769 | } | |
770 | ||
771 | #ifndef HAVE_ARCH_PCI_MWI | |
772 | /* This can be overridden by arch code. */ | |
773 | u8 pci_cache_line_size = L1_CACHE_BYTES >> 2; | |
774 | ||
775 | /** | |
776 | * pci_generic_prep_mwi - helper function for pci_set_mwi | |
777 | * @dev: the PCI device for which MWI is enabled | |
778 | * | |
779 | * Helper function for generic implementation of pcibios_prep_mwi | |
780 | * function. Originally copied from drivers/net/acenic.c. | |
781 | * Copyright 1998-2001 by Jes Sorensen, <[email protected]>. | |
782 | * | |
783 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | |
784 | */ | |
785 | static int | |
786 | pci_generic_prep_mwi(struct pci_dev *dev) | |
787 | { | |
788 | u8 cacheline_size; | |
789 | ||
790 | if (!pci_cache_line_size) | |
791 | return -EINVAL; /* The system doesn't support MWI. */ | |
792 | ||
793 | /* Validate current setting: the PCI_CACHE_LINE_SIZE must be | |
794 | equal to or multiple of the right value. */ | |
795 | pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); | |
796 | if (cacheline_size >= pci_cache_line_size && | |
797 | (cacheline_size % pci_cache_line_size) == 0) | |
798 | return 0; | |
799 | ||
800 | /* Write the correct value. */ | |
801 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); | |
802 | /* Read it back. */ | |
803 | pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); | |
804 | if (cacheline_size == pci_cache_line_size) | |
805 | return 0; | |
806 | ||
807 | printk(KERN_DEBUG "PCI: cache line size of %d is not supported " | |
808 | "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); | |
809 | ||
810 | return -EINVAL; | |
811 | } | |
812 | #endif /* !HAVE_ARCH_PCI_MWI */ | |
813 | ||
814 | /** | |
815 | * pci_set_mwi - enables memory-write-invalidate PCI transaction | |
816 | * @dev: the PCI device for which MWI is enabled | |
817 | * | |
818 | * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, | |
819 | * and then calls @pcibios_set_mwi to do the needed arch specific | |
820 | * operations or a generic mwi-prep function. | |
821 | * | |
822 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | |
823 | */ | |
824 | int | |
825 | pci_set_mwi(struct pci_dev *dev) | |
826 | { | |
827 | int rc; | |
828 | u16 cmd; | |
829 | ||
830 | #ifdef HAVE_ARCH_PCI_MWI | |
831 | rc = pcibios_prep_mwi(dev); | |
832 | #else | |
833 | rc = pci_generic_prep_mwi(dev); | |
834 | #endif | |
835 | ||
836 | if (rc) | |
837 | return rc; | |
838 | ||
839 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
840 | if (! (cmd & PCI_COMMAND_INVALIDATE)) { | |
841 | pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); | |
842 | cmd |= PCI_COMMAND_INVALIDATE; | |
843 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | /** | |
850 | * pci_clear_mwi - disables Memory-Write-Invalidate for device dev | |
851 | * @dev: the PCI device to disable | |
852 | * | |
853 | * Disables PCI Memory-Write-Invalidate transaction on the device | |
854 | */ | |
855 | void | |
856 | pci_clear_mwi(struct pci_dev *dev) | |
857 | { | |
858 | u16 cmd; | |
859 | ||
860 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
861 | if (cmd & PCI_COMMAND_INVALIDATE) { | |
862 | cmd &= ~PCI_COMMAND_INVALIDATE; | |
863 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
864 | } | |
865 | } | |
866 | ||
a04ce0ff BR |
867 | /** |
868 | * pci_intx - enables/disables PCI INTx for device dev | |
8f7020d3 RD |
869 | * @pdev: the PCI device to operate on |
870 | * @enable: boolean: whether to enable or disable PCI INTx | |
a04ce0ff BR |
871 | * |
872 | * Enables/disables PCI INTx for device dev | |
873 | */ | |
874 | void | |
875 | pci_intx(struct pci_dev *pdev, int enable) | |
876 | { | |
877 | u16 pci_command, new; | |
878 | ||
879 | pci_read_config_word(pdev, PCI_COMMAND, &pci_command); | |
880 | ||
881 | if (enable) { | |
882 | new = pci_command & ~PCI_COMMAND_INTX_DISABLE; | |
883 | } else { | |
884 | new = pci_command | PCI_COMMAND_INTX_DISABLE; | |
885 | } | |
886 | ||
887 | if (new != pci_command) { | |
2fd9d74b | 888 | pci_write_config_word(pdev, PCI_COMMAND, new); |
a04ce0ff BR |
889 | } |
890 | } | |
891 | ||
1da177e4 LT |
892 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK |
893 | /* | |
894 | * These can be overridden by arch-specific implementations | |
895 | */ | |
896 | int | |
897 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |
898 | { | |
899 | if (!pci_dma_supported(dev, mask)) | |
900 | return -EIO; | |
901 | ||
902 | dev->dma_mask = mask; | |
903 | ||
904 | return 0; | |
905 | } | |
906 | ||
1da177e4 LT |
907 | int |
908 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |
909 | { | |
910 | if (!pci_dma_supported(dev, mask)) | |
911 | return -EIO; | |
912 | ||
913 | dev->dev.coherent_dma_mask = mask; | |
914 | ||
915 | return 0; | |
916 | } | |
917 | #endif | |
918 | ||
919 | static int __devinit pci_init(void) | |
920 | { | |
921 | struct pci_dev *dev = NULL; | |
922 | ||
923 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | |
924 | pci_fixup_device(pci_fixup_final, dev); | |
925 | } | |
926 | return 0; | |
927 | } | |
928 | ||
929 | static int __devinit pci_setup(char *str) | |
930 | { | |
931 | while (str) { | |
932 | char *k = strchr(str, ','); | |
933 | if (k) | |
934 | *k++ = 0; | |
935 | if (*str && (str = pcibios_setup(str)) && *str) { | |
309e57df MW |
936 | if (!strcmp(str, "nomsi")) { |
937 | pci_no_msi(); | |
938 | } else { | |
939 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | |
940 | str); | |
941 | } | |
1da177e4 LT |
942 | } |
943 | str = k; | |
944 | } | |
945 | return 1; | |
946 | } | |
947 | ||
948 | device_initcall(pci_init); | |
949 | ||
950 | __setup("pci=", pci_setup); | |
951 | ||
952 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | |
953 | /* FIXME: Some boxes have multiple ISA bridges! */ | |
954 | struct pci_dev *isa_bridge; | |
955 | EXPORT_SYMBOL(isa_bridge); | |
956 | #endif | |
957 | ||
064b53db | 958 | EXPORT_SYMBOL_GPL(pci_restore_bars); |
1da177e4 LT |
959 | EXPORT_SYMBOL(pci_enable_device_bars); |
960 | EXPORT_SYMBOL(pci_enable_device); | |
961 | EXPORT_SYMBOL(pci_disable_device); | |
1da177e4 LT |
962 | EXPORT_SYMBOL(pci_find_capability); |
963 | EXPORT_SYMBOL(pci_bus_find_capability); | |
964 | EXPORT_SYMBOL(pci_release_regions); | |
965 | EXPORT_SYMBOL(pci_request_regions); | |
966 | EXPORT_SYMBOL(pci_release_region); | |
967 | EXPORT_SYMBOL(pci_request_region); | |
968 | EXPORT_SYMBOL(pci_set_master); | |
969 | EXPORT_SYMBOL(pci_set_mwi); | |
970 | EXPORT_SYMBOL(pci_clear_mwi); | |
a04ce0ff | 971 | EXPORT_SYMBOL_GPL(pci_intx); |
1da177e4 | 972 | EXPORT_SYMBOL(pci_set_dma_mask); |
1da177e4 LT |
973 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); |
974 | EXPORT_SYMBOL(pci_assign_resource); | |
975 | EXPORT_SYMBOL(pci_find_parent_resource); | |
976 | ||
977 | EXPORT_SYMBOL(pci_set_power_state); | |
978 | EXPORT_SYMBOL(pci_save_state); | |
979 | EXPORT_SYMBOL(pci_restore_state); | |
980 | EXPORT_SYMBOL(pci_enable_wake); | |
981 | ||
982 | /* Quirk info */ | |
983 | ||
984 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | |
985 | EXPORT_SYMBOL(pci_pci_problems); |