]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ | |
3 | * | |
4 | * PCI Bus Services, see include/linux/pci.h for further explanation. | |
5 | * | |
6 | * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, | |
7 | * David Mosberger-Tang | |
8 | * | |
9 | * Copyright 1997 -- 2000 Martin Mares <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/spinlock.h> | |
4e57b681 | 18 | #include <linux/string.h> |
1da177e4 | 19 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ |
bc56b9e0 | 20 | #include "pci.h" |
1da177e4 | 21 | |
ffadcc2f | 22 | unsigned int pci_pm_d3_delay = 10; |
1da177e4 LT |
23 | |
24 | /** | |
25 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children | |
26 | * @bus: pointer to PCI bus structure to search | |
27 | * | |
28 | * Given a PCI bus, returns the highest PCI bus number present in the set | |
29 | * including the given PCI bus and its list of child PCI buses. | |
30 | */ | |
31 | unsigned char __devinit | |
32 | pci_bus_max_busnr(struct pci_bus* bus) | |
33 | { | |
34 | struct list_head *tmp; | |
35 | unsigned char max, n; | |
36 | ||
b82db5ce | 37 | max = bus->subordinate; |
1da177e4 LT |
38 | list_for_each(tmp, &bus->children) { |
39 | n = pci_bus_max_busnr(pci_bus_b(tmp)); | |
40 | if(n > max) | |
41 | max = n; | |
42 | } | |
43 | return max; | |
44 | } | |
b82db5ce | 45 | EXPORT_SYMBOL_GPL(pci_bus_max_busnr); |
1da177e4 | 46 | |
b82db5ce | 47 | #if 0 |
1da177e4 LT |
48 | /** |
49 | * pci_max_busnr - returns maximum PCI bus number | |
50 | * | |
51 | * Returns the highest PCI bus number present in the system global list of | |
52 | * PCI buses. | |
53 | */ | |
54 | unsigned char __devinit | |
55 | pci_max_busnr(void) | |
56 | { | |
57 | struct pci_bus *bus = NULL; | |
58 | unsigned char max, n; | |
59 | ||
60 | max = 0; | |
61 | while ((bus = pci_find_next_bus(bus)) != NULL) { | |
62 | n = pci_bus_max_busnr(bus); | |
63 | if(n > max) | |
64 | max = n; | |
65 | } | |
66 | return max; | |
67 | } | |
68 | ||
54c762fe AB |
69 | #endif /* 0 */ |
70 | ||
687d5fe3 ME |
71 | #define PCI_FIND_CAP_TTL 48 |
72 | ||
73 | static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, | |
74 | u8 pos, int cap, int *ttl) | |
24a4e377 RD |
75 | { |
76 | u8 id; | |
24a4e377 | 77 | |
687d5fe3 | 78 | while ((*ttl)--) { |
24a4e377 RD |
79 | pci_bus_read_config_byte(bus, devfn, pos, &pos); |
80 | if (pos < 0x40) | |
81 | break; | |
82 | pos &= ~3; | |
83 | pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, | |
84 | &id); | |
85 | if (id == 0xff) | |
86 | break; | |
87 | if (id == cap) | |
88 | return pos; | |
89 | pos += PCI_CAP_LIST_NEXT; | |
90 | } | |
91 | return 0; | |
92 | } | |
93 | ||
687d5fe3 ME |
94 | static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, |
95 | u8 pos, int cap) | |
96 | { | |
97 | int ttl = PCI_FIND_CAP_TTL; | |
98 | ||
99 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); | |
100 | } | |
101 | ||
24a4e377 RD |
102 | int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) |
103 | { | |
104 | return __pci_find_next_cap(dev->bus, dev->devfn, | |
105 | pos + PCI_CAP_LIST_NEXT, cap); | |
106 | } | |
107 | EXPORT_SYMBOL_GPL(pci_find_next_capability); | |
108 | ||
d3bac118 ME |
109 | static int __pci_bus_find_cap_start(struct pci_bus *bus, |
110 | unsigned int devfn, u8 hdr_type) | |
1da177e4 LT |
111 | { |
112 | u16 status; | |
1da177e4 LT |
113 | |
114 | pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); | |
115 | if (!(status & PCI_STATUS_CAP_LIST)) | |
116 | return 0; | |
117 | ||
118 | switch (hdr_type) { | |
119 | case PCI_HEADER_TYPE_NORMAL: | |
120 | case PCI_HEADER_TYPE_BRIDGE: | |
d3bac118 | 121 | return PCI_CAPABILITY_LIST; |
1da177e4 | 122 | case PCI_HEADER_TYPE_CARDBUS: |
d3bac118 | 123 | return PCI_CB_CAPABILITY_LIST; |
1da177e4 LT |
124 | default: |
125 | return 0; | |
126 | } | |
d3bac118 ME |
127 | |
128 | return 0; | |
1da177e4 LT |
129 | } |
130 | ||
131 | /** | |
132 | * pci_find_capability - query for devices' capabilities | |
133 | * @dev: PCI device to query | |
134 | * @cap: capability code | |
135 | * | |
136 | * Tell if a device supports a given PCI capability. | |
137 | * Returns the address of the requested capability structure within the | |
138 | * device's PCI configuration space or 0 in case the device does not | |
139 | * support it. Possible values for @cap: | |
140 | * | |
141 | * %PCI_CAP_ID_PM Power Management | |
142 | * %PCI_CAP_ID_AGP Accelerated Graphics Port | |
143 | * %PCI_CAP_ID_VPD Vital Product Data | |
144 | * %PCI_CAP_ID_SLOTID Slot Identification | |
145 | * %PCI_CAP_ID_MSI Message Signalled Interrupts | |
146 | * %PCI_CAP_ID_CHSWP CompactPCI HotSwap | |
147 | * %PCI_CAP_ID_PCIX PCI-X | |
148 | * %PCI_CAP_ID_EXP PCI Express | |
149 | */ | |
150 | int pci_find_capability(struct pci_dev *dev, int cap) | |
151 | { | |
d3bac118 ME |
152 | int pos; |
153 | ||
154 | pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); | |
155 | if (pos) | |
156 | pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); | |
157 | ||
158 | return pos; | |
1da177e4 LT |
159 | } |
160 | ||
161 | /** | |
162 | * pci_bus_find_capability - query for devices' capabilities | |
163 | * @bus: the PCI bus to query | |
164 | * @devfn: PCI device to query | |
165 | * @cap: capability code | |
166 | * | |
167 | * Like pci_find_capability() but works for pci devices that do not have a | |
168 | * pci_dev structure set up yet. | |
169 | * | |
170 | * Returns the address of the requested capability structure within the | |
171 | * device's PCI configuration space or 0 in case the device does not | |
172 | * support it. | |
173 | */ | |
174 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) | |
175 | { | |
d3bac118 | 176 | int pos; |
1da177e4 LT |
177 | u8 hdr_type; |
178 | ||
179 | pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); | |
180 | ||
d3bac118 ME |
181 | pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); |
182 | if (pos) | |
183 | pos = __pci_find_next_cap(bus, devfn, pos, cap); | |
184 | ||
185 | return pos; | |
1da177e4 LT |
186 | } |
187 | ||
188 | /** | |
189 | * pci_find_ext_capability - Find an extended capability | |
190 | * @dev: PCI device to query | |
191 | * @cap: capability code | |
192 | * | |
193 | * Returns the address of the requested extended capability structure | |
194 | * within the device's PCI configuration space or 0 if the device does | |
195 | * not support it. Possible values for @cap: | |
196 | * | |
197 | * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting | |
198 | * %PCI_EXT_CAP_ID_VC Virtual Channel | |
199 | * %PCI_EXT_CAP_ID_DSN Device Serial Number | |
200 | * %PCI_EXT_CAP_ID_PWR Power Budgeting | |
201 | */ | |
202 | int pci_find_ext_capability(struct pci_dev *dev, int cap) | |
203 | { | |
204 | u32 header; | |
205 | int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ | |
206 | int pos = 0x100; | |
207 | ||
208 | if (dev->cfg_size <= 256) | |
209 | return 0; | |
210 | ||
211 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) | |
212 | return 0; | |
213 | ||
214 | /* | |
215 | * If we have no capabilities, this is indicated by cap ID, | |
216 | * cap version and next pointer all being 0. | |
217 | */ | |
218 | if (header == 0) | |
219 | return 0; | |
220 | ||
221 | while (ttl-- > 0) { | |
222 | if (PCI_EXT_CAP_ID(header) == cap) | |
223 | return pos; | |
224 | ||
225 | pos = PCI_EXT_CAP_NEXT(header); | |
226 | if (pos < 0x100) | |
227 | break; | |
228 | ||
229 | if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) | |
230 | break; | |
231 | } | |
232 | ||
233 | return 0; | |
234 | } | |
3a720d72 | 235 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
1da177e4 | 236 | |
687d5fe3 ME |
237 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) |
238 | { | |
239 | int rc, ttl = PCI_FIND_CAP_TTL; | |
240 | u8 cap, mask; | |
241 | ||
242 | if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) | |
243 | mask = HT_3BIT_CAP_MASK; | |
244 | else | |
245 | mask = HT_5BIT_CAP_MASK; | |
246 | ||
247 | pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, | |
248 | PCI_CAP_ID_HT, &ttl); | |
249 | while (pos) { | |
250 | rc = pci_read_config_byte(dev, pos + 3, &cap); | |
251 | if (rc != PCIBIOS_SUCCESSFUL) | |
252 | return 0; | |
253 | ||
254 | if ((cap & mask) == ht_cap) | |
255 | return pos; | |
256 | ||
47a4d5be BG |
257 | pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, |
258 | pos + PCI_CAP_LIST_NEXT, | |
687d5fe3 ME |
259 | PCI_CAP_ID_HT, &ttl); |
260 | } | |
261 | ||
262 | return 0; | |
263 | } | |
264 | /** | |
265 | * pci_find_next_ht_capability - query a device's Hypertransport capabilities | |
266 | * @dev: PCI device to query | |
267 | * @pos: Position from which to continue searching | |
268 | * @ht_cap: Hypertransport capability code | |
269 | * | |
270 | * To be used in conjunction with pci_find_ht_capability() to search for | |
271 | * all capabilities matching @ht_cap. @pos should always be a value returned | |
272 | * from pci_find_ht_capability(). | |
273 | * | |
274 | * NB. To be 100% safe against broken PCI devices, the caller should take | |
275 | * steps to avoid an infinite loop. | |
276 | */ | |
277 | int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) | |
278 | { | |
279 | return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); | |
280 | } | |
281 | EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); | |
282 | ||
283 | /** | |
284 | * pci_find_ht_capability - query a device's Hypertransport capabilities | |
285 | * @dev: PCI device to query | |
286 | * @ht_cap: Hypertransport capability code | |
287 | * | |
288 | * Tell if a device supports a given Hypertransport capability. | |
289 | * Returns an address within the device's PCI configuration space | |
290 | * or 0 in case the device does not support the request capability. | |
291 | * The address points to the PCI capability, of type PCI_CAP_ID_HT, | |
292 | * which has a Hypertransport capability matching @ht_cap. | |
293 | */ | |
294 | int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) | |
295 | { | |
296 | int pos; | |
297 | ||
298 | pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); | |
299 | if (pos) | |
300 | pos = __pci_find_next_ht_cap(dev, pos, ht_cap); | |
301 | ||
302 | return pos; | |
303 | } | |
304 | EXPORT_SYMBOL_GPL(pci_find_ht_capability); | |
305 | ||
1da177e4 LT |
306 | /** |
307 | * pci_find_parent_resource - return resource region of parent bus of given region | |
308 | * @dev: PCI device structure contains resources to be searched | |
309 | * @res: child resource record for which parent is sought | |
310 | * | |
311 | * For given resource region of given device, return the resource | |
312 | * region of parent bus the given region is contained in or where | |
313 | * it should be allocated from. | |
314 | */ | |
315 | struct resource * | |
316 | pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |
317 | { | |
318 | const struct pci_bus *bus = dev->bus; | |
319 | int i; | |
320 | struct resource *best = NULL; | |
321 | ||
322 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | |
323 | struct resource *r = bus->resource[i]; | |
324 | if (!r) | |
325 | continue; | |
326 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | |
327 | continue; /* Not contained */ | |
328 | if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) | |
329 | continue; /* Wrong type */ | |
330 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) | |
331 | return r; /* Exact match */ | |
332 | if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) | |
333 | best = r; /* Approximating prefetchable by non-prefetchable */ | |
334 | } | |
335 | return best; | |
336 | } | |
337 | ||
064b53db JL |
338 | /** |
339 | * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) | |
340 | * @dev: PCI device to have its BARs restored | |
341 | * | |
342 | * Restore the BAR values for a given device, so as to make it | |
343 | * accessible by its driver. | |
344 | */ | |
345 | void | |
346 | pci_restore_bars(struct pci_dev *dev) | |
347 | { | |
348 | int i, numres; | |
349 | ||
350 | switch (dev->hdr_type) { | |
351 | case PCI_HEADER_TYPE_NORMAL: | |
352 | numres = 6; | |
353 | break; | |
354 | case PCI_HEADER_TYPE_BRIDGE: | |
355 | numres = 2; | |
356 | break; | |
357 | case PCI_HEADER_TYPE_CARDBUS: | |
358 | numres = 1; | |
359 | break; | |
360 | default: | |
361 | /* Should never get here, but just in case... */ | |
362 | return; | |
363 | } | |
364 | ||
365 | for (i = 0; i < numres; i ++) | |
366 | pci_update_resource(dev, &dev->resource[i], i); | |
367 | } | |
368 | ||
8f7020d3 RD |
369 | int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); |
370 | ||
1da177e4 LT |
371 | /** |
372 | * pci_set_power_state - Set the power state of a PCI device | |
373 | * @dev: PCI device to be suspended | |
374 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering | |
375 | * | |
376 | * Transition a device to a new power state, using the Power Management | |
377 | * Capabilities in the device's config space. | |
378 | * | |
379 | * RETURN VALUE: | |
380 | * -EINVAL if trying to enter a lower state than we're already in. | |
381 | * 0 if we're already in the requested state. | |
382 | * -EIO if device does not support PCI PM. | |
383 | * 0 if we can successfully change the power state. | |
384 | */ | |
1da177e4 LT |
385 | int |
386 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |
387 | { | |
064b53db | 388 | int pm, need_restore = 0; |
1da177e4 LT |
389 | u16 pmcsr, pmc; |
390 | ||
391 | /* bound the state we're entering */ | |
392 | if (state > PCI_D3hot) | |
393 | state = PCI_D3hot; | |
394 | ||
395 | /* Validate current state: | |
396 | * Can enter D0 from any state, but if we can only go deeper | |
397 | * to sleep if we're already in a low power state | |
398 | */ | |
02669492 AM |
399 | if (state != PCI_D0 && dev->current_state > state) { |
400 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", | |
401 | __FUNCTION__, pci_name(dev), state, dev->current_state); | |
1da177e4 | 402 | return -EINVAL; |
02669492 | 403 | } else if (dev->current_state == state) |
1da177e4 LT |
404 | return 0; /* we're already there */ |
405 | ||
ffadcc2f KCA |
406 | /* |
407 | * If the device or the parent bridge can't support PCI PM, ignore | |
408 | * the request if we're doing anything besides putting it into D0 | |
409 | * (which would only happen on boot). | |
410 | */ | |
411 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) | |
412 | return 0; | |
413 | ||
1da177e4 LT |
414 | /* find PCI PM capability in list */ |
415 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); | |
416 | ||
417 | /* abort if the device doesn't support PM capabilities */ | |
418 | if (!pm) | |
419 | return -EIO; | |
420 | ||
421 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); | |
3fe9d19f | 422 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
1da177e4 LT |
423 | printk(KERN_DEBUG |
424 | "PCI: %s has unsupported PM cap regs version (%u)\n", | |
425 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); | |
426 | return -EIO; | |
427 | } | |
428 | ||
429 | /* check if this device supports the desired state */ | |
3fe9d19f DR |
430 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
431 | return -EIO; | |
432 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) | |
433 | return -EIO; | |
1da177e4 | 434 | |
064b53db JL |
435 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
436 | ||
32a36585 | 437 | /* If we're (effectively) in D3, force entire word to 0. |
1da177e4 LT |
438 | * This doesn't affect PME_Status, disables PME_En, and |
439 | * sets PowerState to 0. | |
440 | */ | |
32a36585 | 441 | switch (dev->current_state) { |
d3535fbb JL |
442 | case PCI_D0: |
443 | case PCI_D1: | |
444 | case PCI_D2: | |
445 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
446 | pmcsr |= state; | |
447 | break; | |
32a36585 JL |
448 | case PCI_UNKNOWN: /* Boot-up */ |
449 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot | |
450 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) | |
064b53db | 451 | need_restore = 1; |
32a36585 | 452 | /* Fall-through: force to D0 */ |
32a36585 | 453 | default: |
d3535fbb | 454 | pmcsr = 0; |
32a36585 | 455 | break; |
1da177e4 LT |
456 | } |
457 | ||
458 | /* enter specified state */ | |
459 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); | |
460 | ||
461 | /* Mandatory power management transition delays */ | |
462 | /* see PCI PM 1.1 5.6.1 table 18 */ | |
463 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | |
ffadcc2f | 464 | msleep(pci_pm_d3_delay); |
1da177e4 LT |
465 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
466 | udelay(200); | |
1da177e4 | 467 | |
b913100d DSL |
468 | /* |
469 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx | |
d6e05edc | 470 | * Firmware method after native method ? |
b913100d DSL |
471 | */ |
472 | if (platform_pci_set_power_state) | |
473 | platform_pci_set_power_state(dev, state); | |
474 | ||
475 | dev->current_state = state; | |
064b53db JL |
476 | |
477 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT | |
478 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning | |
479 | * from D3hot to D0 _may_ perform an internal reset, thereby | |
480 | * going to "D0 Uninitialized" rather than "D0 Initialized". | |
481 | * For example, at least some versions of the 3c905B and the | |
482 | * 3c556B exhibit this behaviour. | |
483 | * | |
484 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave | |
485 | * devices in a D3hot state at boot. Consequently, we need to | |
486 | * restore at least the BARs so that the device will be | |
487 | * accessible to its driver. | |
488 | */ | |
489 | if (need_restore) | |
490 | pci_restore_bars(dev); | |
491 | ||
1da177e4 LT |
492 | return 0; |
493 | } | |
494 | ||
f165b10f | 495 | int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); |
0f64474b | 496 | |
1da177e4 LT |
497 | /** |
498 | * pci_choose_state - Choose the power state of a PCI device | |
499 | * @dev: PCI device to be suspended | |
500 | * @state: target sleep state for the whole system. This is the value | |
501 | * that is passed to suspend() function. | |
502 | * | |
503 | * Returns PCI power state suitable for given device and given system | |
504 | * message. | |
505 | */ | |
506 | ||
507 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) | |
508 | { | |
0f64474b DSL |
509 | int ret; |
510 | ||
1da177e4 LT |
511 | if (!pci_find_capability(dev, PCI_CAP_ID_PM)) |
512 | return PCI_D0; | |
513 | ||
0f64474b DSL |
514 | if (platform_pci_choose_state) { |
515 | ret = platform_pci_choose_state(dev, state); | |
516 | if (ret >= 0) | |
ca078bae | 517 | state.event = ret; |
0f64474b | 518 | } |
ca078bae PM |
519 | |
520 | switch (state.event) { | |
521 | case PM_EVENT_ON: | |
522 | return PCI_D0; | |
523 | case PM_EVENT_FREEZE: | |
b887d2e6 DB |
524 | case PM_EVENT_PRETHAW: |
525 | /* REVISIT both freeze and pre-thaw "should" use D0 */ | |
ca078bae PM |
526 | case PM_EVENT_SUSPEND: |
527 | return PCI_D3hot; | |
1da177e4 | 528 | default: |
b887d2e6 | 529 | printk("Unrecognized suspend event %d\n", state.event); |
1da177e4 LT |
530 | BUG(); |
531 | } | |
532 | return PCI_D0; | |
533 | } | |
534 | ||
535 | EXPORT_SYMBOL(pci_choose_state); | |
536 | ||
b56a5a23 MT |
537 | static int pci_save_pcie_state(struct pci_dev *dev) |
538 | { | |
539 | int pos, i = 0; | |
540 | struct pci_cap_saved_state *save_state; | |
541 | u16 *cap; | |
542 | ||
543 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | |
544 | if (pos <= 0) | |
545 | return 0; | |
546 | ||
547 | save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); | |
548 | if (!save_state) { | |
549 | dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); | |
550 | return -ENOMEM; | |
551 | } | |
552 | cap = (u16 *)&save_state->data[0]; | |
553 | ||
554 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); | |
555 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); | |
556 | pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); | |
557 | pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); | |
558 | pci_add_saved_cap(dev, save_state); | |
559 | return 0; | |
560 | } | |
561 | ||
562 | static void pci_restore_pcie_state(struct pci_dev *dev) | |
563 | { | |
564 | int i = 0, pos; | |
565 | struct pci_cap_saved_state *save_state; | |
566 | u16 *cap; | |
567 | ||
568 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); | |
569 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | |
570 | if (!save_state || pos <= 0) | |
571 | return; | |
572 | cap = (u16 *)&save_state->data[0]; | |
573 | ||
574 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); | |
575 | pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); | |
576 | pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); | |
577 | pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); | |
578 | pci_remove_saved_cap(save_state); | |
579 | kfree(save_state); | |
580 | } | |
581 | ||
cc692a5f SH |
582 | |
583 | static int pci_save_pcix_state(struct pci_dev *dev) | |
584 | { | |
585 | int pos, i = 0; | |
586 | struct pci_cap_saved_state *save_state; | |
587 | u16 *cap; | |
588 | ||
589 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | |
590 | if (pos <= 0) | |
591 | return 0; | |
592 | ||
593 | save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); | |
594 | if (!save_state) { | |
595 | dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); | |
596 | return -ENOMEM; | |
597 | } | |
598 | cap = (u16 *)&save_state->data[0]; | |
599 | ||
600 | pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); | |
601 | pci_add_saved_cap(dev, save_state); | |
602 | return 0; | |
603 | } | |
604 | ||
605 | static void pci_restore_pcix_state(struct pci_dev *dev) | |
606 | { | |
607 | int i = 0, pos; | |
608 | struct pci_cap_saved_state *save_state; | |
609 | u16 *cap; | |
610 | ||
611 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); | |
612 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | |
613 | if (!save_state || pos <= 0) | |
614 | return; | |
615 | cap = (u16 *)&save_state->data[0]; | |
616 | ||
617 | pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); | |
618 | pci_remove_saved_cap(save_state); | |
619 | kfree(save_state); | |
620 | } | |
621 | ||
622 | ||
1da177e4 LT |
623 | /** |
624 | * pci_save_state - save the PCI configuration space of a device before suspending | |
625 | * @dev: - PCI device that we're dealing with | |
1da177e4 LT |
626 | */ |
627 | int | |
628 | pci_save_state(struct pci_dev *dev) | |
629 | { | |
630 | int i; | |
631 | /* XXX: 100% dword access ok here? */ | |
632 | for (i = 0; i < 16; i++) | |
633 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); | |
41017f0c SL |
634 | if ((i = pci_save_msi_state(dev)) != 0) |
635 | return i; | |
636 | if ((i = pci_save_msix_state(dev)) != 0) | |
637 | return i; | |
b56a5a23 MT |
638 | if ((i = pci_save_pcie_state(dev)) != 0) |
639 | return i; | |
cc692a5f SH |
640 | if ((i = pci_save_pcix_state(dev)) != 0) |
641 | return i; | |
1da177e4 LT |
642 | return 0; |
643 | } | |
644 | ||
645 | /** | |
646 | * pci_restore_state - Restore the saved state of a PCI device | |
647 | * @dev: - PCI device that we're dealing with | |
1da177e4 LT |
648 | */ |
649 | int | |
650 | pci_restore_state(struct pci_dev *dev) | |
651 | { | |
652 | int i; | |
04d9c1a1 | 653 | int val; |
1da177e4 | 654 | |
b56a5a23 MT |
655 | /* PCI Express register must be restored first */ |
656 | pci_restore_pcie_state(dev); | |
657 | ||
8b8c8d28 YL |
658 | /* |
659 | * The Base Address register should be programmed before the command | |
660 | * register(s) | |
661 | */ | |
662 | for (i = 15; i >= 0; i--) { | |
04d9c1a1 DJ |
663 | pci_read_config_dword(dev, i * 4, &val); |
664 | if (val != dev->saved_config_space[i]) { | |
665 | printk(KERN_DEBUG "PM: Writing back config space on " | |
666 | "device %s at offset %x (was %x, writing %x)\n", | |
667 | pci_name(dev), i, | |
668 | val, (int)dev->saved_config_space[i]); | |
669 | pci_write_config_dword(dev,i * 4, | |
670 | dev->saved_config_space[i]); | |
671 | } | |
672 | } | |
cc692a5f | 673 | pci_restore_pcix_state(dev); |
41017f0c SL |
674 | pci_restore_msi_state(dev); |
675 | pci_restore_msix_state(dev); | |
1da177e4 LT |
676 | return 0; |
677 | } | |
678 | ||
679 | /** | |
680 | * pci_enable_device_bars - Initialize some of a device for use | |
681 | * @dev: PCI device to be initialized | |
682 | * @bars: bitmask of BAR's that must be configured | |
683 | * | |
684 | * Initialize device before it's used by a driver. Ask low-level code | |
685 | * to enable selected I/O and memory resources. Wake up the device if it | |
686 | * was suspended. Beware, this function can fail. | |
687 | */ | |
688 | ||
689 | int | |
690 | pci_enable_device_bars(struct pci_dev *dev, int bars) | |
691 | { | |
692 | int err; | |
693 | ||
95a62965 | 694 | err = pci_set_power_state(dev, PCI_D0); |
11f3859b | 695 | if (err < 0 && err != -EIO) |
95a62965 GKH |
696 | return err; |
697 | err = pcibios_enable_device(dev, bars); | |
698 | if (err < 0) | |
1da177e4 LT |
699 | return err; |
700 | return 0; | |
701 | } | |
702 | ||
703 | /** | |
bae94d02 | 704 | * __pci_enable_device - Initialize device before it's used by a driver. |
1da177e4 LT |
705 | * @dev: PCI device to be initialized |
706 | * | |
707 | * Initialize device before it's used by a driver. Ask low-level code | |
708 | * to enable I/O and memory. Wake up the device if it was suspended. | |
709 | * Beware, this function can fail. | |
bae94d02 IPG |
710 | * |
711 | * Note this function is a backend and is not supposed to be called by | |
712 | * normal code, use pci_enable_device() instead. | |
1da177e4 LT |
713 | */ |
714 | int | |
bae94d02 | 715 | __pci_enable_device(struct pci_dev *dev) |
1da177e4 | 716 | { |
a1e022b3 KA |
717 | int err; |
718 | ||
a1e022b3 | 719 | err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); |
b64c05e7 | 720 | if (err) |
1da177e4 LT |
721 | return err; |
722 | pci_fixup_device(pci_fixup_enable, dev); | |
723 | return 0; | |
724 | } | |
725 | ||
bae94d02 IPG |
726 | /** |
727 | * pci_enable_device - Initialize device before it's used by a driver. | |
728 | * @dev: PCI device to be initialized | |
729 | * | |
730 | * Initialize device before it's used by a driver. Ask low-level code | |
731 | * to enable I/O and memory. Wake up the device if it was suspended. | |
732 | * Beware, this function can fail. | |
733 | * | |
734 | * Note we don't actually enable the device many times if we call | |
735 | * this function repeatedly (we just increment the count). | |
736 | */ | |
737 | int pci_enable_device(struct pci_dev *dev) | |
738 | { | |
739 | int result; | |
740 | if (atomic_add_return(1, &dev->enable_cnt) > 1) | |
741 | return 0; /* already enabled */ | |
742 | result = __pci_enable_device(dev); | |
743 | if (result < 0) | |
744 | atomic_dec(&dev->enable_cnt); | |
745 | return result; | |
746 | } | |
747 | ||
1da177e4 LT |
748 | /** |
749 | * pcibios_disable_device - disable arch specific PCI resources for device dev | |
750 | * @dev: the PCI device to disable | |
751 | * | |
752 | * Disables architecture specific PCI resources for the device. This | |
753 | * is the default implementation. Architecture implementations can | |
754 | * override this. | |
755 | */ | |
756 | void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} | |
757 | ||
758 | /** | |
759 | * pci_disable_device - Disable PCI device after use | |
760 | * @dev: PCI device to be disabled | |
761 | * | |
762 | * Signal to the system that the PCI device is not in use by the system | |
763 | * anymore. This only involves disabling PCI bus-mastering, if active. | |
bae94d02 IPG |
764 | * |
765 | * Note we don't actually disable the device until all callers of | |
766 | * pci_device_enable() have called pci_device_disable(). | |
1da177e4 LT |
767 | */ |
768 | void | |
769 | pci_disable_device(struct pci_dev *dev) | |
770 | { | |
771 | u16 pci_command; | |
99dc804d | 772 | |
bae94d02 IPG |
773 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) |
774 | return; | |
775 | ||
99dc804d SL |
776 | if (dev->msi_enabled) |
777 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | |
778 | PCI_CAP_ID_MSI); | |
779 | if (dev->msix_enabled) | |
780 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | |
781 | PCI_CAP_ID_MSIX); | |
782 | ||
1da177e4 LT |
783 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); |
784 | if (pci_command & PCI_COMMAND_MASTER) { | |
785 | pci_command &= ~PCI_COMMAND_MASTER; | |
786 | pci_write_config_word(dev, PCI_COMMAND, pci_command); | |
787 | } | |
ceb43744 | 788 | dev->is_busmaster = 0; |
1da177e4 LT |
789 | |
790 | pcibios_disable_device(dev); | |
791 | } | |
792 | ||
793 | /** | |
794 | * pci_enable_wake - enable device to generate PME# when suspended | |
795 | * @dev: - PCI device to operate on | |
796 | * @state: - Current state of device. | |
797 | * @enable: - Flag to enable or disable generation | |
798 | * | |
799 | * Set the bits in the device's PM Capabilities to generate PME# when | |
800 | * the system is suspended. | |
801 | * | |
802 | * -EIO is returned if device doesn't have PM Capabilities. | |
803 | * -EINVAL is returned if device supports it, but can't generate wake events. | |
804 | * 0 if operation is successful. | |
805 | * | |
806 | */ | |
807 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) | |
808 | { | |
809 | int pm; | |
810 | u16 value; | |
811 | ||
812 | /* find PCI PM capability in list */ | |
813 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); | |
814 | ||
815 | /* If device doesn't support PM Capabilities, but request is to disable | |
816 | * wake events, it's a nop; otherwise fail */ | |
817 | if (!pm) | |
818 | return enable ? -EIO : 0; | |
819 | ||
820 | /* Check device's ability to generate PME# */ | |
821 | pci_read_config_word(dev,pm+PCI_PM_PMC,&value); | |
822 | ||
823 | value &= PCI_PM_CAP_PME_MASK; | |
824 | value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ | |
825 | ||
826 | /* Check if it can generate PME# from requested state. */ | |
827 | if (!value || !(value & (1 << state))) | |
828 | return enable ? -EINVAL : 0; | |
829 | ||
830 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); | |
831 | ||
832 | /* Clear PME_Status by writing 1 to it and enable PME# */ | |
833 | value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; | |
834 | ||
835 | if (!enable) | |
836 | value &= ~PCI_PM_CTRL_PME_ENABLE; | |
837 | ||
838 | pci_write_config_word(dev, pm + PCI_PM_CTRL, value); | |
839 | ||
840 | return 0; | |
841 | } | |
842 | ||
843 | int | |
844 | pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) | |
845 | { | |
846 | u8 pin; | |
847 | ||
514d207d | 848 | pin = dev->pin; |
1da177e4 LT |
849 | if (!pin) |
850 | return -1; | |
851 | pin--; | |
852 | while (dev->bus->self) { | |
853 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | |
854 | dev = dev->bus->self; | |
855 | } | |
856 | *bridge = dev; | |
857 | return pin; | |
858 | } | |
859 | ||
860 | /** | |
861 | * pci_release_region - Release a PCI bar | |
862 | * @pdev: PCI device whose resources were previously reserved by pci_request_region | |
863 | * @bar: BAR to release | |
864 | * | |
865 | * Releases the PCI I/O and memory resources previously reserved by a | |
866 | * successful call to pci_request_region. Call this function only | |
867 | * after all use of the PCI regions has ceased. | |
868 | */ | |
869 | void pci_release_region(struct pci_dev *pdev, int bar) | |
870 | { | |
871 | if (pci_resource_len(pdev, bar) == 0) | |
872 | return; | |
873 | if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) | |
874 | release_region(pci_resource_start(pdev, bar), | |
875 | pci_resource_len(pdev, bar)); | |
876 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) | |
877 | release_mem_region(pci_resource_start(pdev, bar), | |
878 | pci_resource_len(pdev, bar)); | |
879 | } | |
880 | ||
881 | /** | |
882 | * pci_request_region - Reserved PCI I/O and memory resource | |
883 | * @pdev: PCI device whose resources are to be reserved | |
884 | * @bar: BAR to be reserved | |
885 | * @res_name: Name to be associated with resource. | |
886 | * | |
887 | * Mark the PCI region associated with PCI device @pdev BR @bar as | |
888 | * being reserved by owner @res_name. Do not access any | |
889 | * address inside the PCI regions unless this call returns | |
890 | * successfully. | |
891 | * | |
892 | * Returns 0 on success, or %EBUSY on error. A warning | |
893 | * message is also printed on failure. | |
894 | */ | |
3c990e92 | 895 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) |
1da177e4 LT |
896 | { |
897 | if (pci_resource_len(pdev, bar) == 0) | |
898 | return 0; | |
899 | ||
900 | if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { | |
901 | if (!request_region(pci_resource_start(pdev, bar), | |
902 | pci_resource_len(pdev, bar), res_name)) | |
903 | goto err_out; | |
904 | } | |
905 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { | |
906 | if (!request_mem_region(pci_resource_start(pdev, bar), | |
907 | pci_resource_len(pdev, bar), res_name)) | |
908 | goto err_out; | |
909 | } | |
910 | ||
911 | return 0; | |
912 | ||
913 | err_out: | |
1396a8c3 GKH |
914 | printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " |
915 | "for device %s\n", | |
1da177e4 LT |
916 | pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", |
917 | bar + 1, /* PCI BAR # */ | |
1396a8c3 GKH |
918 | (unsigned long long)pci_resource_len(pdev, bar), |
919 | (unsigned long long)pci_resource_start(pdev, bar), | |
1da177e4 LT |
920 | pci_name(pdev)); |
921 | return -EBUSY; | |
922 | } | |
923 | ||
924 | ||
925 | /** | |
926 | * pci_release_regions - Release reserved PCI I/O and memory resources | |
927 | * @pdev: PCI device whose resources were previously reserved by pci_request_regions | |
928 | * | |
929 | * Releases all PCI I/O and memory resources previously reserved by a | |
930 | * successful call to pci_request_regions. Call this function only | |
931 | * after all use of the PCI regions has ceased. | |
932 | */ | |
933 | ||
934 | void pci_release_regions(struct pci_dev *pdev) | |
935 | { | |
936 | int i; | |
937 | ||
938 | for (i = 0; i < 6; i++) | |
939 | pci_release_region(pdev, i); | |
940 | } | |
941 | ||
942 | /** | |
943 | * pci_request_regions - Reserved PCI I/O and memory resources | |
944 | * @pdev: PCI device whose resources are to be reserved | |
945 | * @res_name: Name to be associated with resource. | |
946 | * | |
947 | * Mark all PCI regions associated with PCI device @pdev as | |
948 | * being reserved by owner @res_name. Do not access any | |
949 | * address inside the PCI regions unless this call returns | |
950 | * successfully. | |
951 | * | |
952 | * Returns 0 on success, or %EBUSY on error. A warning | |
953 | * message is also printed on failure. | |
954 | */ | |
3c990e92 | 955 | int pci_request_regions(struct pci_dev *pdev, const char *res_name) |
1da177e4 LT |
956 | { |
957 | int i; | |
958 | ||
959 | for (i = 0; i < 6; i++) | |
960 | if(pci_request_region(pdev, i, res_name)) | |
961 | goto err_out; | |
962 | return 0; | |
963 | ||
964 | err_out: | |
965 | while(--i >= 0) | |
966 | pci_release_region(pdev, i); | |
967 | ||
968 | return -EBUSY; | |
969 | } | |
970 | ||
971 | /** | |
972 | * pci_set_master - enables bus-mastering for device dev | |
973 | * @dev: the PCI device to enable | |
974 | * | |
975 | * Enables bus-mastering on the device and calls pcibios_set_master() | |
976 | * to do the needed arch specific settings. | |
977 | */ | |
978 | void | |
979 | pci_set_master(struct pci_dev *dev) | |
980 | { | |
981 | u16 cmd; | |
982 | ||
983 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
984 | if (! (cmd & PCI_COMMAND_MASTER)) { | |
985 | pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); | |
986 | cmd |= PCI_COMMAND_MASTER; | |
987 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
988 | } | |
989 | dev->is_busmaster = 1; | |
990 | pcibios_set_master(dev); | |
991 | } | |
992 | ||
edb2d97e MW |
993 | #ifdef PCI_DISABLE_MWI |
994 | int pci_set_mwi(struct pci_dev *dev) | |
995 | { | |
996 | return 0; | |
997 | } | |
998 | ||
999 | void pci_clear_mwi(struct pci_dev *dev) | |
1000 | { | |
1001 | } | |
1002 | ||
1003 | #else | |
ebf5a248 MW |
1004 | |
1005 | #ifndef PCI_CACHE_LINE_BYTES | |
1006 | #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES | |
1007 | #endif | |
1008 | ||
1da177e4 | 1009 | /* This can be overridden by arch code. */ |
ebf5a248 MW |
1010 | /* Don't forget this is measured in 32-bit words, not bytes */ |
1011 | u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; | |
1da177e4 LT |
1012 | |
1013 | /** | |
edb2d97e MW |
1014 | * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed |
1015 | * @dev: the PCI device for which MWI is to be enabled | |
1da177e4 | 1016 | * |
edb2d97e MW |
1017 | * Helper function for pci_set_mwi. |
1018 | * Originally copied from drivers/net/acenic.c. | |
1da177e4 LT |
1019 | * Copyright 1998-2001 by Jes Sorensen, <[email protected]>. |
1020 | * | |
1021 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | |
1022 | */ | |
1023 | static int | |
edb2d97e | 1024 | pci_set_cacheline_size(struct pci_dev *dev) |
1da177e4 LT |
1025 | { |
1026 | u8 cacheline_size; | |
1027 | ||
1028 | if (!pci_cache_line_size) | |
1029 | return -EINVAL; /* The system doesn't support MWI. */ | |
1030 | ||
1031 | /* Validate current setting: the PCI_CACHE_LINE_SIZE must be | |
1032 | equal to or multiple of the right value. */ | |
1033 | pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); | |
1034 | if (cacheline_size >= pci_cache_line_size && | |
1035 | (cacheline_size % pci_cache_line_size) == 0) | |
1036 | return 0; | |
1037 | ||
1038 | /* Write the correct value. */ | |
1039 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); | |
1040 | /* Read it back. */ | |
1041 | pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); | |
1042 | if (cacheline_size == pci_cache_line_size) | |
1043 | return 0; | |
1044 | ||
1045 | printk(KERN_DEBUG "PCI: cache line size of %d is not supported " | |
1046 | "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); | |
1047 | ||
1048 | return -EINVAL; | |
1049 | } | |
1da177e4 LT |
1050 | |
1051 | /** | |
1052 | * pci_set_mwi - enables memory-write-invalidate PCI transaction | |
1053 | * @dev: the PCI device for which MWI is enabled | |
1054 | * | |
1055 | * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, | |
1056 | * and then calls @pcibios_set_mwi to do the needed arch specific | |
1057 | * operations or a generic mwi-prep function. | |
1058 | * | |
1059 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | |
1060 | */ | |
1061 | int | |
1062 | pci_set_mwi(struct pci_dev *dev) | |
1063 | { | |
1064 | int rc; | |
1065 | u16 cmd; | |
1066 | ||
edb2d97e | 1067 | rc = pci_set_cacheline_size(dev); |
1da177e4 LT |
1068 | if (rc) |
1069 | return rc; | |
1070 | ||
1071 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
1072 | if (! (cmd & PCI_COMMAND_INVALIDATE)) { | |
1073 | pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); | |
1074 | cmd |= PCI_COMMAND_INVALIDATE; | |
1075 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
1076 | } | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | /** | |
1082 | * pci_clear_mwi - disables Memory-Write-Invalidate for device dev | |
1083 | * @dev: the PCI device to disable | |
1084 | * | |
1085 | * Disables PCI Memory-Write-Invalidate transaction on the device | |
1086 | */ | |
1087 | void | |
1088 | pci_clear_mwi(struct pci_dev *dev) | |
1089 | { | |
1090 | u16 cmd; | |
1091 | ||
1092 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
1093 | if (cmd & PCI_COMMAND_INVALIDATE) { | |
1094 | cmd &= ~PCI_COMMAND_INVALIDATE; | |
1095 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
1096 | } | |
1097 | } | |
edb2d97e | 1098 | #endif /* ! PCI_DISABLE_MWI */ |
1da177e4 | 1099 | |
a04ce0ff BR |
1100 | /** |
1101 | * pci_intx - enables/disables PCI INTx for device dev | |
8f7020d3 RD |
1102 | * @pdev: the PCI device to operate on |
1103 | * @enable: boolean: whether to enable or disable PCI INTx | |
a04ce0ff BR |
1104 | * |
1105 | * Enables/disables PCI INTx for device dev | |
1106 | */ | |
1107 | void | |
1108 | pci_intx(struct pci_dev *pdev, int enable) | |
1109 | { | |
1110 | u16 pci_command, new; | |
1111 | ||
1112 | pci_read_config_word(pdev, PCI_COMMAND, &pci_command); | |
1113 | ||
1114 | if (enable) { | |
1115 | new = pci_command & ~PCI_COMMAND_INTX_DISABLE; | |
1116 | } else { | |
1117 | new = pci_command | PCI_COMMAND_INTX_DISABLE; | |
1118 | } | |
1119 | ||
1120 | if (new != pci_command) { | |
2fd9d74b | 1121 | pci_write_config_word(pdev, PCI_COMMAND, new); |
a04ce0ff BR |
1122 | } |
1123 | } | |
1124 | ||
1da177e4 LT |
1125 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK |
1126 | /* | |
1127 | * These can be overridden by arch-specific implementations | |
1128 | */ | |
1129 | int | |
1130 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |
1131 | { | |
1132 | if (!pci_dma_supported(dev, mask)) | |
1133 | return -EIO; | |
1134 | ||
1135 | dev->dma_mask = mask; | |
1136 | ||
1137 | return 0; | |
1138 | } | |
1139 | ||
1da177e4 LT |
1140 | int |
1141 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |
1142 | { | |
1143 | if (!pci_dma_supported(dev, mask)) | |
1144 | return -EIO; | |
1145 | ||
1146 | dev->dev.coherent_dma_mask = mask; | |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | #endif | |
1151 | ||
1152 | static int __devinit pci_init(void) | |
1153 | { | |
1154 | struct pci_dev *dev = NULL; | |
1155 | ||
1156 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | |
1157 | pci_fixup_device(pci_fixup_final, dev); | |
1158 | } | |
1159 | return 0; | |
1160 | } | |
1161 | ||
1162 | static int __devinit pci_setup(char *str) | |
1163 | { | |
1164 | while (str) { | |
1165 | char *k = strchr(str, ','); | |
1166 | if (k) | |
1167 | *k++ = 0; | |
1168 | if (*str && (str = pcibios_setup(str)) && *str) { | |
309e57df MW |
1169 | if (!strcmp(str, "nomsi")) { |
1170 | pci_no_msi(); | |
1171 | } else { | |
1172 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | |
1173 | str); | |
1174 | } | |
1da177e4 LT |
1175 | } |
1176 | str = k; | |
1177 | } | |
0637a70a | 1178 | return 0; |
1da177e4 | 1179 | } |
0637a70a | 1180 | early_param("pci", pci_setup); |
1da177e4 LT |
1181 | |
1182 | device_initcall(pci_init); | |
1da177e4 LT |
1183 | |
1184 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | |
1185 | /* FIXME: Some boxes have multiple ISA bridges! */ | |
1186 | struct pci_dev *isa_bridge; | |
1187 | EXPORT_SYMBOL(isa_bridge); | |
1188 | #endif | |
1189 | ||
064b53db | 1190 | EXPORT_SYMBOL_GPL(pci_restore_bars); |
1da177e4 LT |
1191 | EXPORT_SYMBOL(pci_enable_device_bars); |
1192 | EXPORT_SYMBOL(pci_enable_device); | |
1193 | EXPORT_SYMBOL(pci_disable_device); | |
1da177e4 LT |
1194 | EXPORT_SYMBOL(pci_find_capability); |
1195 | EXPORT_SYMBOL(pci_bus_find_capability); | |
1196 | EXPORT_SYMBOL(pci_release_regions); | |
1197 | EXPORT_SYMBOL(pci_request_regions); | |
1198 | EXPORT_SYMBOL(pci_release_region); | |
1199 | EXPORT_SYMBOL(pci_request_region); | |
1200 | EXPORT_SYMBOL(pci_set_master); | |
1201 | EXPORT_SYMBOL(pci_set_mwi); | |
1202 | EXPORT_SYMBOL(pci_clear_mwi); | |
a04ce0ff | 1203 | EXPORT_SYMBOL_GPL(pci_intx); |
1da177e4 | 1204 | EXPORT_SYMBOL(pci_set_dma_mask); |
1da177e4 LT |
1205 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); |
1206 | EXPORT_SYMBOL(pci_assign_resource); | |
1207 | EXPORT_SYMBOL(pci_find_parent_resource); | |
1208 | ||
1209 | EXPORT_SYMBOL(pci_set_power_state); | |
1210 | EXPORT_SYMBOL(pci_save_state); | |
1211 | EXPORT_SYMBOL(pci_restore_state); | |
1212 | EXPORT_SYMBOL(pci_enable_wake); | |
1213 | ||
1214 | /* Quirk info */ | |
1215 | ||
1216 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | |
1217 | EXPORT_SYMBOL(pci_pci_problems); |