]> Git Repo - linux.git/blame - drivers/pci/pci.c
Documentation: maintainer change
[linux.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <[email protected]>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
32a9a682 25#include <asm/setup.h>
bc56b9e0 26#include "pci.h"
1da177e4 27
00240c38
AS
28const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30};
31EXPORT_SYMBOL_GPL(pci_power_names);
32
93177a74
RW
33int isa_dma_bridge_buggy;
34EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36int pci_pci_problems;
37EXPORT_SYMBOL(pci_pci_problems);
38
1ae861e6
RW
39unsigned int pci_pm_d3_delay;
40
df17e62e
MG
41static void pci_pme_list_scan(struct work_struct *work);
42
43static LIST_HEAD(pci_pme_list);
44static DEFINE_MUTEX(pci_pme_list_mutex);
45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50};
51
52#define PME_TIMEOUT 1000 /* How long between PME checks */
53
1ae861e6
RW
54static void pci_dev_d3_sleep(struct pci_dev *dev)
55{
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62}
1da177e4 63
32a2eea7
JG
64#ifdef CONFIG_PCI_DOMAINS
65int pci_domains_supported = 1;
66#endif
67
4516a618
AN
68#define DEFAULT_CARDBUS_IO_SIZE (256)
69#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
28760489
EB
74#define DEFAULT_HOTPLUG_IO_SIZE (256)
75#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
5f39e670 80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 81
ac1aa47b
JB
82/*
83 * The default CLS is used if arch didn't set CLS explicitly and not
84 * all pci devices agree on the same value. Arch can override either
85 * the dfl or actual value as it sees fit. Don't forget this is
86 * measured in 32-bit words, not bytes.
87 */
98e724c7 88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
89u8 pci_cache_line_size;
90
96c55900
MS
91/*
92 * If we set up a device for bus mastering, we need to check the latency
93 * timer as certain BIOSes forget to set it properly.
94 */
95unsigned int pcibios_max_latency = 255;
96
6748dcc2
RW
97/* If set, the PCIe ARI capability will not be used. */
98static bool pcie_ari_disabled;
99
1da177e4
LT
100/**
101 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
102 * @bus: pointer to PCI bus structure to search
103 *
104 * Given a PCI bus, returns the highest PCI bus number present in the set
105 * including the given PCI bus and its list of child PCI buses.
106 */
96bde06a 107unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
108{
109 struct list_head *tmp;
110 unsigned char max, n;
111
b82db5ce 112 max = bus->subordinate;
1da177e4
LT
113 list_for_each(tmp, &bus->children) {
114 n = pci_bus_max_busnr(pci_bus_b(tmp));
115 if(n > max)
116 max = n;
117 }
118 return max;
119}
b82db5ce 120EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 121
1684f5dd
AM
122#ifdef CONFIG_HAS_IOMEM
123void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
124{
125 /*
126 * Make sure the BAR is actually a memory resource, not an IO resource
127 */
128 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
129 WARN_ON(1);
130 return NULL;
131 }
132 return ioremap_nocache(pci_resource_start(pdev, bar),
133 pci_resource_len(pdev, bar));
134}
135EXPORT_SYMBOL_GPL(pci_ioremap_bar);
136#endif
137
b82db5ce 138#if 0
1da177e4
LT
139/**
140 * pci_max_busnr - returns maximum PCI bus number
141 *
142 * Returns the highest PCI bus number present in the system global list of
143 * PCI buses.
144 */
145unsigned char __devinit
146pci_max_busnr(void)
147{
148 struct pci_bus *bus = NULL;
149 unsigned char max, n;
150
151 max = 0;
152 while ((bus = pci_find_next_bus(bus)) != NULL) {
153 n = pci_bus_max_busnr(bus);
154 if(n > max)
155 max = n;
156 }
157 return max;
158}
159
54c762fe
AB
160#endif /* 0 */
161
687d5fe3
ME
162#define PCI_FIND_CAP_TTL 48
163
164static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
165 u8 pos, int cap, int *ttl)
24a4e377
RD
166{
167 u8 id;
24a4e377 168
687d5fe3 169 while ((*ttl)--) {
24a4e377
RD
170 pci_bus_read_config_byte(bus, devfn, pos, &pos);
171 if (pos < 0x40)
172 break;
173 pos &= ~3;
174 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
175 &id);
176 if (id == 0xff)
177 break;
178 if (id == cap)
179 return pos;
180 pos += PCI_CAP_LIST_NEXT;
181 }
182 return 0;
183}
184
687d5fe3
ME
185static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
186 u8 pos, int cap)
187{
188 int ttl = PCI_FIND_CAP_TTL;
189
190 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
191}
192
24a4e377
RD
193int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
194{
195 return __pci_find_next_cap(dev->bus, dev->devfn,
196 pos + PCI_CAP_LIST_NEXT, cap);
197}
198EXPORT_SYMBOL_GPL(pci_find_next_capability);
199
d3bac118
ME
200static int __pci_bus_find_cap_start(struct pci_bus *bus,
201 unsigned int devfn, u8 hdr_type)
1da177e4
LT
202{
203 u16 status;
1da177e4
LT
204
205 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
206 if (!(status & PCI_STATUS_CAP_LIST))
207 return 0;
208
209 switch (hdr_type) {
210 case PCI_HEADER_TYPE_NORMAL:
211 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 212 return PCI_CAPABILITY_LIST;
1da177e4 213 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 214 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
215 default:
216 return 0;
217 }
d3bac118
ME
218
219 return 0;
1da177e4
LT
220}
221
222/**
223 * pci_find_capability - query for devices' capabilities
224 * @dev: PCI device to query
225 * @cap: capability code
226 *
227 * Tell if a device supports a given PCI capability.
228 * Returns the address of the requested capability structure within the
229 * device's PCI configuration space or 0 in case the device does not
230 * support it. Possible values for @cap:
231 *
232 * %PCI_CAP_ID_PM Power Management
233 * %PCI_CAP_ID_AGP Accelerated Graphics Port
234 * %PCI_CAP_ID_VPD Vital Product Data
235 * %PCI_CAP_ID_SLOTID Slot Identification
236 * %PCI_CAP_ID_MSI Message Signalled Interrupts
237 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
238 * %PCI_CAP_ID_PCIX PCI-X
239 * %PCI_CAP_ID_EXP PCI Express
240 */
241int pci_find_capability(struct pci_dev *dev, int cap)
242{
d3bac118
ME
243 int pos;
244
245 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
246 if (pos)
247 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
248
249 return pos;
1da177e4
LT
250}
251
252/**
253 * pci_bus_find_capability - query for devices' capabilities
254 * @bus: the PCI bus to query
255 * @devfn: PCI device to query
256 * @cap: capability code
257 *
258 * Like pci_find_capability() but works for pci devices that do not have a
259 * pci_dev structure set up yet.
260 *
261 * Returns the address of the requested capability structure within the
262 * device's PCI configuration space or 0 in case the device does not
263 * support it.
264 */
265int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
266{
d3bac118 267 int pos;
1da177e4
LT
268 u8 hdr_type;
269
270 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
271
d3bac118
ME
272 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
273 if (pos)
274 pos = __pci_find_next_cap(bus, devfn, pos, cap);
275
276 return pos;
1da177e4
LT
277}
278
279/**
280 * pci_find_ext_capability - Find an extended capability
281 * @dev: PCI device to query
282 * @cap: capability code
283 *
284 * Returns the address of the requested extended capability structure
285 * within the device's PCI configuration space or 0 if the device does
286 * not support it. Possible values for @cap:
287 *
288 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
289 * %PCI_EXT_CAP_ID_VC Virtual Channel
290 * %PCI_EXT_CAP_ID_DSN Device Serial Number
291 * %PCI_EXT_CAP_ID_PWR Power Budgeting
292 */
293int pci_find_ext_capability(struct pci_dev *dev, int cap)
294{
295 u32 header;
557848c3
ZY
296 int ttl;
297 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 298
557848c3
ZY
299 /* minimum 8 bytes per capability */
300 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
301
302 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
303 return 0;
304
305 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
306 return 0;
307
308 /*
309 * If we have no capabilities, this is indicated by cap ID,
310 * cap version and next pointer all being 0.
311 */
312 if (header == 0)
313 return 0;
314
315 while (ttl-- > 0) {
316 if (PCI_EXT_CAP_ID(header) == cap)
317 return pos;
318
319 pos = PCI_EXT_CAP_NEXT(header);
557848c3 320 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
321 break;
322
323 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
324 break;
325 }
326
327 return 0;
328}
3a720d72 329EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 330
cf4c43dd
JB
331/**
332 * pci_bus_find_ext_capability - find an extended capability
333 * @bus: the PCI bus to query
334 * @devfn: PCI device to query
335 * @cap: capability code
336 *
337 * Like pci_find_ext_capability() but works for pci devices that do not have a
338 * pci_dev structure set up yet.
339 *
340 * Returns the address of the requested capability structure within the
341 * device's PCI configuration space or 0 in case the device does not
342 * support it.
343 */
344int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
345 int cap)
346{
347 u32 header;
348 int ttl;
349 int pos = PCI_CFG_SPACE_SIZE;
350
351 /* minimum 8 bytes per capability */
352 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
353
354 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
355 return 0;
356 if (header == 0xffffffff || header == 0)
357 return 0;
358
359 while (ttl-- > 0) {
360 if (PCI_EXT_CAP_ID(header) == cap)
361 return pos;
362
363 pos = PCI_EXT_CAP_NEXT(header);
364 if (pos < PCI_CFG_SPACE_SIZE)
365 break;
366
367 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
368 break;
369 }
370
371 return 0;
372}
373
687d5fe3
ME
374static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
375{
376 int rc, ttl = PCI_FIND_CAP_TTL;
377 u8 cap, mask;
378
379 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
380 mask = HT_3BIT_CAP_MASK;
381 else
382 mask = HT_5BIT_CAP_MASK;
383
384 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
385 PCI_CAP_ID_HT, &ttl);
386 while (pos) {
387 rc = pci_read_config_byte(dev, pos + 3, &cap);
388 if (rc != PCIBIOS_SUCCESSFUL)
389 return 0;
390
391 if ((cap & mask) == ht_cap)
392 return pos;
393
47a4d5be
BG
394 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
395 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
396 PCI_CAP_ID_HT, &ttl);
397 }
398
399 return 0;
400}
401/**
402 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
403 * @dev: PCI device to query
404 * @pos: Position from which to continue searching
405 * @ht_cap: Hypertransport capability code
406 *
407 * To be used in conjunction with pci_find_ht_capability() to search for
408 * all capabilities matching @ht_cap. @pos should always be a value returned
409 * from pci_find_ht_capability().
410 *
411 * NB. To be 100% safe against broken PCI devices, the caller should take
412 * steps to avoid an infinite loop.
413 */
414int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
415{
416 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
417}
418EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
419
420/**
421 * pci_find_ht_capability - query a device's Hypertransport capabilities
422 * @dev: PCI device to query
423 * @ht_cap: Hypertransport capability code
424 *
425 * Tell if a device supports a given Hypertransport capability.
426 * Returns an address within the device's PCI configuration space
427 * or 0 in case the device does not support the request capability.
428 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
429 * which has a Hypertransport capability matching @ht_cap.
430 */
431int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
432{
433 int pos;
434
435 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
436 if (pos)
437 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
438
439 return pos;
440}
441EXPORT_SYMBOL_GPL(pci_find_ht_capability);
442
1da177e4
LT
443/**
444 * pci_find_parent_resource - return resource region of parent bus of given region
445 * @dev: PCI device structure contains resources to be searched
446 * @res: child resource record for which parent is sought
447 *
448 * For given resource region of given device, return the resource
449 * region of parent bus the given region is contained in or where
450 * it should be allocated from.
451 */
452struct resource *
453pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
454{
455 const struct pci_bus *bus = dev->bus;
456 int i;
89a74ecc 457 struct resource *best = NULL, *r;
1da177e4 458
89a74ecc 459 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
460 if (!r)
461 continue;
462 if (res->start && !(res->start >= r->start && res->end <= r->end))
463 continue; /* Not contained */
464 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
465 continue; /* Wrong type */
466 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
467 return r; /* Exact match */
8c8def26
LT
468 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
469 if (r->flags & IORESOURCE_PREFETCH)
470 continue;
471 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
472 if (!best)
473 best = r;
1da177e4
LT
474 }
475 return best;
476}
477
064b53db
JL
478/**
479 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
480 * @dev: PCI device to have its BARs restored
481 *
482 * Restore the BAR values for a given device, so as to make it
483 * accessible by its driver.
484 */
ad668599 485static void
064b53db
JL
486pci_restore_bars(struct pci_dev *dev)
487{
bc5f5a82 488 int i;
064b53db 489
bc5f5a82 490 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 491 pci_update_resource(dev, i);
064b53db
JL
492}
493
961d9120
RW
494static struct pci_platform_pm_ops *pci_platform_pm;
495
496int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
497{
eb9d0fe4
RW
498 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
499 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
500 return -EINVAL;
501 pci_platform_pm = ops;
502 return 0;
503}
504
505static inline bool platform_pci_power_manageable(struct pci_dev *dev)
506{
507 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
508}
509
510static inline int platform_pci_set_power_state(struct pci_dev *dev,
511 pci_power_t t)
512{
513 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
514}
515
516static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
517{
518 return pci_platform_pm ?
519 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
520}
8f7020d3 521
eb9d0fe4
RW
522static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
523{
524 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
525}
526
527static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
528{
529 return pci_platform_pm ?
530 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
531}
532
b67ea761
RW
533static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
534{
535 return pci_platform_pm ?
536 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
537}
538
1da177e4 539/**
44e4e66e
RW
540 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
541 * given PCI device
542 * @dev: PCI device to handle.
44e4e66e 543 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 544 *
44e4e66e
RW
545 * RETURN VALUE:
546 * -EINVAL if the requested state is invalid.
547 * -EIO if device does not support PCI PM or its PM capabilities register has a
548 * wrong version, or device doesn't support the requested state.
549 * 0 if device already is in the requested state.
550 * 0 if device's power state has been successfully changed.
1da177e4 551 */
f00a20ef 552static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 553{
337001b6 554 u16 pmcsr;
44e4e66e 555 bool need_restore = false;
1da177e4 556
4a865905
RW
557 /* Check if we're already there */
558 if (dev->current_state == state)
559 return 0;
560
337001b6 561 if (!dev->pm_cap)
cca03dec
AL
562 return -EIO;
563
44e4e66e
RW
564 if (state < PCI_D0 || state > PCI_D3hot)
565 return -EINVAL;
566
1da177e4
LT
567 /* Validate current state:
568 * Can enter D0 from any state, but if we can only go deeper
569 * to sleep if we're already in a low power state
570 */
4a865905 571 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 572 && dev->current_state > state) {
80ccba11
BH
573 dev_err(&dev->dev, "invalid power transition "
574 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 575 return -EINVAL;
44e4e66e 576 }
1da177e4 577
1da177e4 578 /* check if this device supports the desired state */
337001b6
RW
579 if ((state == PCI_D1 && !dev->d1_support)
580 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 581 return -EIO;
1da177e4 582
337001b6 583 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 584
32a36585 585 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
586 * This doesn't affect PME_Status, disables PME_En, and
587 * sets PowerState to 0.
588 */
32a36585 589 switch (dev->current_state) {
d3535fbb
JL
590 case PCI_D0:
591 case PCI_D1:
592 case PCI_D2:
593 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
594 pmcsr |= state;
595 break;
f62795f1
RW
596 case PCI_D3hot:
597 case PCI_D3cold:
32a36585
JL
598 case PCI_UNKNOWN: /* Boot-up */
599 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 600 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 601 need_restore = true;
32a36585 602 /* Fall-through: force to D0 */
32a36585 603 default:
d3535fbb 604 pmcsr = 0;
32a36585 605 break;
1da177e4
LT
606 }
607
608 /* enter specified state */
337001b6 609 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
610
611 /* Mandatory power management transition delays */
612 /* see PCI PM 1.1 5.6.1 table 18 */
613 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 614 pci_dev_d3_sleep(dev);
1da177e4 615 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 616 udelay(PCI_PM_D2_DELAY);
1da177e4 617
e13cdbd7
RW
618 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
619 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
620 if (dev->current_state != state && printk_ratelimit())
621 dev_info(&dev->dev, "Refused to change power state, "
622 "currently in D%d\n", dev->current_state);
064b53db
JL
623
624 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
625 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
626 * from D3hot to D0 _may_ perform an internal reset, thereby
627 * going to "D0 Uninitialized" rather than "D0 Initialized".
628 * For example, at least some versions of the 3c905B and the
629 * 3c556B exhibit this behaviour.
630 *
631 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
632 * devices in a D3hot state at boot. Consequently, we need to
633 * restore at least the BARs so that the device will be
634 * accessible to its driver.
635 */
636 if (need_restore)
637 pci_restore_bars(dev);
638
f00a20ef 639 if (dev->bus->self)
7d715a6c
SL
640 pcie_aspm_pm_state_change(dev->bus->self);
641
1da177e4
LT
642 return 0;
643}
644
44e4e66e
RW
645/**
646 * pci_update_current_state - Read PCI power state of given device from its
647 * PCI PM registers and cache it
648 * @dev: PCI device to handle.
f06fc0b6 649 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 650 */
73410429 651void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 652{
337001b6 653 if (dev->pm_cap) {
44e4e66e
RW
654 u16 pmcsr;
655
337001b6 656 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 657 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
658 } else {
659 dev->current_state = state;
44e4e66e
RW
660 }
661}
662
0e5dd46b
RW
663/**
664 * pci_platform_power_transition - Use platform to change device power state
665 * @dev: PCI device to handle.
666 * @state: State to put the device into.
667 */
668static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
669{
670 int error;
671
672 if (platform_pci_power_manageable(dev)) {
673 error = platform_pci_set_power_state(dev, state);
674 if (!error)
675 pci_update_current_state(dev, state);
b51306c6
AH
676 /* Fall back to PCI_D0 if native PM is not supported */
677 if (!dev->pm_cap)
678 dev->current_state = PCI_D0;
0e5dd46b
RW
679 } else {
680 error = -ENODEV;
681 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
682 if (!dev->pm_cap)
683 dev->current_state = PCI_D0;
0e5dd46b
RW
684 }
685
686 return error;
687}
688
689/**
690 * __pci_start_power_transition - Start power transition of a PCI device
691 * @dev: PCI device to handle.
692 * @state: State to put the device into.
693 */
694static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
695{
696 if (state == PCI_D0)
697 pci_platform_power_transition(dev, PCI_D0);
698}
699
700/**
701 * __pci_complete_power_transition - Complete power transition of a PCI device
702 * @dev: PCI device to handle.
703 * @state: State to put the device into.
704 *
705 * This function should not be called directly by device drivers.
706 */
707int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
708{
cc2893b6 709 return state >= PCI_D0 ?
0e5dd46b
RW
710 pci_platform_power_transition(dev, state) : -EINVAL;
711}
712EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
713
44e4e66e
RW
714/**
715 * pci_set_power_state - Set the power state of a PCI device
716 * @dev: PCI device to handle.
717 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
718 *
877d0310 719 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
720 * the device's PCI PM registers.
721 *
722 * RETURN VALUE:
723 * -EINVAL if the requested state is invalid.
724 * -EIO if device does not support PCI PM or its PM capabilities register has a
725 * wrong version, or device doesn't support the requested state.
726 * 0 if device already is in the requested state.
727 * 0 if device's power state has been successfully changed.
728 */
729int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
730{
337001b6 731 int error;
44e4e66e
RW
732
733 /* bound the state we're entering */
734 if (state > PCI_D3hot)
735 state = PCI_D3hot;
736 else if (state < PCI_D0)
737 state = PCI_D0;
738 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
739 /*
740 * If the device or the parent bridge do not support PCI PM,
741 * ignore the request if we're doing anything other than putting
742 * it into D0 (which would only happen on boot).
743 */
744 return 0;
745
0e5dd46b
RW
746 __pci_start_power_transition(dev, state);
747
979b1791
AC
748 /* This device is quirked not to be put into D3, so
749 don't put it in D3 */
750 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
751 return 0;
44e4e66e 752
f00a20ef 753 error = pci_raw_set_power_state(dev, state);
44e4e66e 754
0e5dd46b
RW
755 if (!__pci_complete_power_transition(dev, state))
756 error = 0;
1a680b7c
NC
757 /*
758 * When aspm_policy is "powersave" this call ensures
759 * that ASPM is configured.
760 */
761 if (!error && dev->bus->self)
762 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
763
764 return error;
765}
766
1da177e4
LT
767/**
768 * pci_choose_state - Choose the power state of a PCI device
769 * @dev: PCI device to be suspended
770 * @state: target sleep state for the whole system. This is the value
771 * that is passed to suspend() function.
772 *
773 * Returns PCI power state suitable for given device and given system
774 * message.
775 */
776
777pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
778{
ab826ca4 779 pci_power_t ret;
0f64474b 780
1da177e4
LT
781 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
782 return PCI_D0;
783
961d9120
RW
784 ret = platform_pci_choose_state(dev);
785 if (ret != PCI_POWER_ERROR)
786 return ret;
ca078bae
PM
787
788 switch (state.event) {
789 case PM_EVENT_ON:
790 return PCI_D0;
791 case PM_EVENT_FREEZE:
b887d2e6
DB
792 case PM_EVENT_PRETHAW:
793 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 794 case PM_EVENT_SUSPEND:
3a2d5b70 795 case PM_EVENT_HIBERNATE:
ca078bae 796 return PCI_D3hot;
1da177e4 797 default:
80ccba11
BH
798 dev_info(&dev->dev, "unrecognized suspend event %d\n",
799 state.event);
1da177e4
LT
800 BUG();
801 }
802 return PCI_D0;
803}
804
805EXPORT_SYMBOL(pci_choose_state);
806
89858517
YZ
807#define PCI_EXP_SAVE_REGS 7
808
1b6b8ce2
YZ
809#define pcie_cap_has_devctl(type, flags) 1
810#define pcie_cap_has_lnkctl(type, flags) \
811 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
812 (type == PCI_EXP_TYPE_ROOT_PORT || \
813 type == PCI_EXP_TYPE_ENDPOINT || \
814 type == PCI_EXP_TYPE_LEG_END))
815#define pcie_cap_has_sltctl(type, flags) \
816 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
817 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
818 (type == PCI_EXP_TYPE_DOWNSTREAM && \
819 (flags & PCI_EXP_FLAGS_SLOT))))
820#define pcie_cap_has_rtctl(type, flags) \
821 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
822 (type == PCI_EXP_TYPE_ROOT_PORT || \
823 type == PCI_EXP_TYPE_RC_EC))
824#define pcie_cap_has_devctl2(type, flags) \
825 ((flags & PCI_EXP_FLAGS_VERS) > 1)
826#define pcie_cap_has_lnkctl2(type, flags) \
827 ((flags & PCI_EXP_FLAGS_VERS) > 1)
828#define pcie_cap_has_sltctl2(type, flags) \
829 ((flags & PCI_EXP_FLAGS_VERS) > 1)
830
34a4876e
YL
831static struct pci_cap_saved_state *pci_find_saved_cap(
832 struct pci_dev *pci_dev, char cap)
833{
834 struct pci_cap_saved_state *tmp;
835 struct hlist_node *pos;
836
837 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
838 if (tmp->cap.cap_nr == cap)
839 return tmp;
840 }
841 return NULL;
842}
843
b56a5a23
MT
844static int pci_save_pcie_state(struct pci_dev *dev)
845{
846 int pos, i = 0;
847 struct pci_cap_saved_state *save_state;
848 u16 *cap;
1b6b8ce2 849 u16 flags;
b56a5a23 850
06a1cbaf
KK
851 pos = pci_pcie_cap(dev);
852 if (!pos)
b56a5a23
MT
853 return 0;
854
9f35575d 855 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 856 if (!save_state) {
e496b617 857 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
858 return -ENOMEM;
859 }
24a4742f 860 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 861
1b6b8ce2
YZ
862 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863
864 if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
866 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
868 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
870 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
872 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
874 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
876 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
63f4898a 878
b56a5a23
MT
879 return 0;
880}
881
882static void pci_restore_pcie_state(struct pci_dev *dev)
883{
884 int i = 0, pos;
885 struct pci_cap_saved_state *save_state;
886 u16 *cap;
1b6b8ce2 887 u16 flags;
b56a5a23
MT
888
889 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
890 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
891 if (!save_state || pos <= 0)
892 return;
24a4742f 893 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 894
1b6b8ce2
YZ
895 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
896
897 if (pcie_cap_has_devctl(dev->pcie_type, flags))
898 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
899 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
900 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
901 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
902 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
903 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
904 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
905 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
906 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
907 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
908 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
909 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
910 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
911}
912
cc692a5f
SH
913
914static int pci_save_pcix_state(struct pci_dev *dev)
915{
63f4898a 916 int pos;
cc692a5f 917 struct pci_cap_saved_state *save_state;
cc692a5f
SH
918
919 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
920 if (pos <= 0)
921 return 0;
922
f34303de 923 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 924 if (!save_state) {
e496b617 925 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
926 return -ENOMEM;
927 }
cc692a5f 928
24a4742f
AW
929 pci_read_config_word(dev, pos + PCI_X_CMD,
930 (u16 *)save_state->cap.data);
63f4898a 931
cc692a5f
SH
932 return 0;
933}
934
935static void pci_restore_pcix_state(struct pci_dev *dev)
936{
937 int i = 0, pos;
938 struct pci_cap_saved_state *save_state;
939 u16 *cap;
940
941 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
942 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
943 if (!save_state || pos <= 0)
944 return;
24a4742f 945 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
946
947 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
948}
949
950
1da177e4
LT
951/**
952 * pci_save_state - save the PCI configuration space of a device before suspending
953 * @dev: - PCI device that we're dealing with
1da177e4
LT
954 */
955int
956pci_save_state(struct pci_dev *dev)
957{
958 int i;
959 /* XXX: 100% dword access ok here? */
960 for (i = 0; i < 16; i++)
9e0b5b2c 961 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 962 dev->state_saved = true;
b56a5a23
MT
963 if ((i = pci_save_pcie_state(dev)) != 0)
964 return i;
cc692a5f
SH
965 if ((i = pci_save_pcix_state(dev)) != 0)
966 return i;
1da177e4
LT
967 return 0;
968}
969
ebfc5b80
RW
970static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
971 u32 saved_val, int retry)
972{
973 u32 val;
974
975 pci_read_config_dword(pdev, offset, &val);
976 if (val == saved_val)
977 return;
978
979 for (;;) {
980 dev_dbg(&pdev->dev, "restoring config space at offset "
981 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
982 pci_write_config_dword(pdev, offset, saved_val);
983 if (retry-- <= 0)
984 return;
985
986 pci_read_config_dword(pdev, offset, &val);
987 if (val == saved_val)
988 return;
989
990 mdelay(1);
991 }
992}
993
994static void pci_restore_config_space(struct pci_dev *pdev, int start, int end,
995 int retry)
996{
997 int index;
998
999 for (index = end; index >= start; index--)
1000 pci_restore_config_dword(pdev, 4 * index,
1001 pdev->saved_config_space[index],
1002 retry);
1003}
1004
1da177e4
LT
1005/**
1006 * pci_restore_state - Restore the saved state of a PCI device
1007 * @dev: - PCI device that we're dealing with
1da177e4 1008 */
1d3c16a8 1009void pci_restore_state(struct pci_dev *dev)
1da177e4 1010{
c82f63e4 1011 if (!dev->state_saved)
1d3c16a8 1012 return;
4b77b0a2 1013
b56a5a23
MT
1014 /* PCI Express register must be restored first */
1015 pci_restore_pcie_state(dev);
1900ca13 1016 pci_restore_ats_state(dev);
b56a5a23 1017
ebfc5b80 1018 pci_restore_config_space(dev, 10, 15, 0);
8b8c8d28
YL
1019 /*
1020 * The Base Address register should be programmed before the command
1021 * register(s)
1022 */
ebfc5b80
RW
1023 pci_restore_config_space(dev, 4, 9, 10);
1024 pci_restore_config_space(dev, 0, 3, 0);
1025
cc692a5f 1026 pci_restore_pcix_state(dev);
41017f0c 1027 pci_restore_msi_state(dev);
8c5cdb6a 1028 pci_restore_iov_state(dev);
8fed4b65 1029
4b77b0a2 1030 dev->state_saved = false;
1da177e4
LT
1031}
1032
ffbdd3f7
AW
1033struct pci_saved_state {
1034 u32 config_space[16];
1035 struct pci_cap_saved_data cap[0];
1036};
1037
1038/**
1039 * pci_store_saved_state - Allocate and return an opaque struct containing
1040 * the device saved state.
1041 * @dev: PCI device that we're dealing with
1042 *
1043 * Rerturn NULL if no state or error.
1044 */
1045struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1046{
1047 struct pci_saved_state *state;
1048 struct pci_cap_saved_state *tmp;
1049 struct pci_cap_saved_data *cap;
1050 struct hlist_node *pos;
1051 size_t size;
1052
1053 if (!dev->state_saved)
1054 return NULL;
1055
1056 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1057
1058 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1059 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1060
1061 state = kzalloc(size, GFP_KERNEL);
1062 if (!state)
1063 return NULL;
1064
1065 memcpy(state->config_space, dev->saved_config_space,
1066 sizeof(state->config_space));
1067
1068 cap = state->cap;
1069 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1070 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1071 memcpy(cap, &tmp->cap, len);
1072 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1073 }
1074 /* Empty cap_save terminates list */
1075
1076 return state;
1077}
1078EXPORT_SYMBOL_GPL(pci_store_saved_state);
1079
1080/**
1081 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1082 * @dev: PCI device that we're dealing with
1083 * @state: Saved state returned from pci_store_saved_state()
1084 */
1085int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1086{
1087 struct pci_cap_saved_data *cap;
1088
1089 dev->state_saved = false;
1090
1091 if (!state)
1092 return 0;
1093
1094 memcpy(dev->saved_config_space, state->config_space,
1095 sizeof(state->config_space));
1096
1097 cap = state->cap;
1098 while (cap->size) {
1099 struct pci_cap_saved_state *tmp;
1100
1101 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1102 if (!tmp || tmp->cap.size != cap->size)
1103 return -EINVAL;
1104
1105 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1106 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1107 sizeof(struct pci_cap_saved_data) + cap->size);
1108 }
1109
1110 dev->state_saved = true;
1111 return 0;
1112}
1113EXPORT_SYMBOL_GPL(pci_load_saved_state);
1114
1115/**
1116 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1117 * and free the memory allocated for it.
1118 * @dev: PCI device that we're dealing with
1119 * @state: Pointer to saved state returned from pci_store_saved_state()
1120 */
1121int pci_load_and_free_saved_state(struct pci_dev *dev,
1122 struct pci_saved_state **state)
1123{
1124 int ret = pci_load_saved_state(dev, *state);
1125 kfree(*state);
1126 *state = NULL;
1127 return ret;
1128}
1129EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1130
38cc1302
HS
1131static int do_pci_enable_device(struct pci_dev *dev, int bars)
1132{
1133 int err;
1134
1135 err = pci_set_power_state(dev, PCI_D0);
1136 if (err < 0 && err != -EIO)
1137 return err;
1138 err = pcibios_enable_device(dev, bars);
1139 if (err < 0)
1140 return err;
1141 pci_fixup_device(pci_fixup_enable, dev);
1142
1143 return 0;
1144}
1145
1146/**
0b62e13b 1147 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1148 * @dev: PCI device to be resumed
1149 *
1150 * Note this function is a backend of pci_default_resume and is not supposed
1151 * to be called by normal code, write proper resume handler and use it instead.
1152 */
0b62e13b 1153int pci_reenable_device(struct pci_dev *dev)
38cc1302 1154{
296ccb08 1155 if (pci_is_enabled(dev))
38cc1302
HS
1156 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1157 return 0;
1158}
1159
b718989d
BH
1160static int __pci_enable_device_flags(struct pci_dev *dev,
1161 resource_size_t flags)
1da177e4
LT
1162{
1163 int err;
b718989d 1164 int i, bars = 0;
1da177e4 1165
97c145f7
JB
1166 /*
1167 * Power state could be unknown at this point, either due to a fresh
1168 * boot or a device removal call. So get the current power state
1169 * so that things like MSI message writing will behave as expected
1170 * (e.g. if the device really is in D0 at enable time).
1171 */
1172 if (dev->pm_cap) {
1173 u16 pmcsr;
1174 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1175 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1176 }
1177
9fb625c3
HS
1178 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1179 return 0; /* already enabled */
1180
497f16f2
YL
1181 /* only skip sriov related */
1182 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1183 if (dev->resource[i].flags & flags)
1184 bars |= (1 << i);
1185 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1186 if (dev->resource[i].flags & flags)
1187 bars |= (1 << i);
1188
38cc1302 1189 err = do_pci_enable_device(dev, bars);
95a62965 1190 if (err < 0)
38cc1302 1191 atomic_dec(&dev->enable_cnt);
9fb625c3 1192 return err;
1da177e4
LT
1193}
1194
b718989d
BH
1195/**
1196 * pci_enable_device_io - Initialize a device for use with IO space
1197 * @dev: PCI device to be initialized
1198 *
1199 * Initialize device before it's used by a driver. Ask low-level code
1200 * to enable I/O resources. Wake up the device if it was suspended.
1201 * Beware, this function can fail.
1202 */
1203int pci_enable_device_io(struct pci_dev *dev)
1204{
1205 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1206}
1207
1208/**
1209 * pci_enable_device_mem - Initialize a device for use with Memory space
1210 * @dev: PCI device to be initialized
1211 *
1212 * Initialize device before it's used by a driver. Ask low-level code
1213 * to enable Memory resources. Wake up the device if it was suspended.
1214 * Beware, this function can fail.
1215 */
1216int pci_enable_device_mem(struct pci_dev *dev)
1217{
1218 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1219}
1220
bae94d02
IPG
1221/**
1222 * pci_enable_device - Initialize device before it's used by a driver.
1223 * @dev: PCI device to be initialized
1224 *
1225 * Initialize device before it's used by a driver. Ask low-level code
1226 * to enable I/O and memory. Wake up the device if it was suspended.
1227 * Beware, this function can fail.
1228 *
1229 * Note we don't actually enable the device many times if we call
1230 * this function repeatedly (we just increment the count).
1231 */
1232int pci_enable_device(struct pci_dev *dev)
1233{
b718989d 1234 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1235}
1236
9ac7849e
TH
1237/*
1238 * Managed PCI resources. This manages device on/off, intx/msi/msix
1239 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1240 * there's no need to track it separately. pci_devres is initialized
1241 * when a device is enabled using managed PCI device enable interface.
1242 */
1243struct pci_devres {
7f375f32
TH
1244 unsigned int enabled:1;
1245 unsigned int pinned:1;
9ac7849e
TH
1246 unsigned int orig_intx:1;
1247 unsigned int restore_intx:1;
1248 u32 region_mask;
1249};
1250
1251static void pcim_release(struct device *gendev, void *res)
1252{
1253 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1254 struct pci_devres *this = res;
1255 int i;
1256
1257 if (dev->msi_enabled)
1258 pci_disable_msi(dev);
1259 if (dev->msix_enabled)
1260 pci_disable_msix(dev);
1261
1262 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1263 if (this->region_mask & (1 << i))
1264 pci_release_region(dev, i);
1265
1266 if (this->restore_intx)
1267 pci_intx(dev, this->orig_intx);
1268
7f375f32 1269 if (this->enabled && !this->pinned)
9ac7849e
TH
1270 pci_disable_device(dev);
1271}
1272
1273static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1274{
1275 struct pci_devres *dr, *new_dr;
1276
1277 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1278 if (dr)
1279 return dr;
1280
1281 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1282 if (!new_dr)
1283 return NULL;
1284 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1285}
1286
1287static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1288{
1289 if (pci_is_managed(pdev))
1290 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1291 return NULL;
1292}
1293
1294/**
1295 * pcim_enable_device - Managed pci_enable_device()
1296 * @pdev: PCI device to be initialized
1297 *
1298 * Managed pci_enable_device().
1299 */
1300int pcim_enable_device(struct pci_dev *pdev)
1301{
1302 struct pci_devres *dr;
1303 int rc;
1304
1305 dr = get_pci_dr(pdev);
1306 if (unlikely(!dr))
1307 return -ENOMEM;
b95d58ea
TH
1308 if (dr->enabled)
1309 return 0;
9ac7849e
TH
1310
1311 rc = pci_enable_device(pdev);
1312 if (!rc) {
1313 pdev->is_managed = 1;
7f375f32 1314 dr->enabled = 1;
9ac7849e
TH
1315 }
1316 return rc;
1317}
1318
1319/**
1320 * pcim_pin_device - Pin managed PCI device
1321 * @pdev: PCI device to pin
1322 *
1323 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1324 * driver detach. @pdev must have been enabled with
1325 * pcim_enable_device().
1326 */
1327void pcim_pin_device(struct pci_dev *pdev)
1328{
1329 struct pci_devres *dr;
1330
1331 dr = find_pci_dr(pdev);
7f375f32 1332 WARN_ON(!dr || !dr->enabled);
9ac7849e 1333 if (dr)
7f375f32 1334 dr->pinned = 1;
9ac7849e
TH
1335}
1336
1da177e4
LT
1337/**
1338 * pcibios_disable_device - disable arch specific PCI resources for device dev
1339 * @dev: the PCI device to disable
1340 *
1341 * Disables architecture specific PCI resources for the device. This
1342 * is the default implementation. Architecture implementations can
1343 * override this.
1344 */
1345void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1346
fa58d305
RW
1347static void do_pci_disable_device(struct pci_dev *dev)
1348{
1349 u16 pci_command;
1350
1351 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1352 if (pci_command & PCI_COMMAND_MASTER) {
1353 pci_command &= ~PCI_COMMAND_MASTER;
1354 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1355 }
1356
1357 pcibios_disable_device(dev);
1358}
1359
1360/**
1361 * pci_disable_enabled_device - Disable device without updating enable_cnt
1362 * @dev: PCI device to disable
1363 *
1364 * NOTE: This function is a backend of PCI power management routines and is
1365 * not supposed to be called drivers.
1366 */
1367void pci_disable_enabled_device(struct pci_dev *dev)
1368{
296ccb08 1369 if (pci_is_enabled(dev))
fa58d305
RW
1370 do_pci_disable_device(dev);
1371}
1372
1da177e4
LT
1373/**
1374 * pci_disable_device - Disable PCI device after use
1375 * @dev: PCI device to be disabled
1376 *
1377 * Signal to the system that the PCI device is not in use by the system
1378 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1379 *
1380 * Note we don't actually disable the device until all callers of
ee6583f6 1381 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1382 */
1383void
1384pci_disable_device(struct pci_dev *dev)
1385{
9ac7849e 1386 struct pci_devres *dr;
99dc804d 1387
9ac7849e
TH
1388 dr = find_pci_dr(dev);
1389 if (dr)
7f375f32 1390 dr->enabled = 0;
9ac7849e 1391
bae94d02
IPG
1392 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1393 return;
1394
fa58d305 1395 do_pci_disable_device(dev);
1da177e4 1396
fa58d305 1397 dev->is_busmaster = 0;
1da177e4
LT
1398}
1399
f7bdd12d
BK
1400/**
1401 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1402 * @dev: the PCIe device reset
f7bdd12d
BK
1403 * @state: Reset state to enter into
1404 *
1405 *
45e829ea 1406 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1407 * implementation. Architecture implementations can override this.
1408 */
1409int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1410 enum pcie_reset_state state)
1411{
1412 return -EINVAL;
1413}
1414
1415/**
1416 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1417 * @dev: the PCIe device reset
f7bdd12d
BK
1418 * @state: Reset state to enter into
1419 *
1420 *
1421 * Sets the PCI reset state for the device.
1422 */
1423int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1424{
1425 return pcibios_set_pcie_reset_state(dev, state);
1426}
1427
58ff4633
RW
1428/**
1429 * pci_check_pme_status - Check if given device has generated PME.
1430 * @dev: Device to check.
1431 *
1432 * Check the PME status of the device and if set, clear it and clear PME enable
1433 * (if set). Return 'true' if PME status and PME enable were both set or
1434 * 'false' otherwise.
1435 */
1436bool pci_check_pme_status(struct pci_dev *dev)
1437{
1438 int pmcsr_pos;
1439 u16 pmcsr;
1440 bool ret = false;
1441
1442 if (!dev->pm_cap)
1443 return false;
1444
1445 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1446 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1447 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1448 return false;
1449
1450 /* Clear PME status. */
1451 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1452 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1453 /* Disable PME to avoid interrupt flood. */
1454 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1455 ret = true;
1456 }
1457
1458 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1459
1460 return ret;
1461}
1462
b67ea761
RW
1463/**
1464 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1465 * @dev: Device to handle.
379021d5 1466 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1467 *
1468 * Check if @dev has generated PME and queue a resume request for it in that
1469 * case.
1470 */
379021d5 1471static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1472{
379021d5
RW
1473 if (pme_poll_reset && dev->pme_poll)
1474 dev->pme_poll = false;
1475
c125e96f 1476 if (pci_check_pme_status(dev)) {
c125e96f 1477 pci_wakeup_event(dev);
0f953bf6 1478 pm_request_resume(&dev->dev);
c125e96f 1479 }
b67ea761
RW
1480 return 0;
1481}
1482
1483/**
1484 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1485 * @bus: Top bus of the subtree to walk.
1486 */
1487void pci_pme_wakeup_bus(struct pci_bus *bus)
1488{
1489 if (bus)
379021d5 1490 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1491}
1492
eb9d0fe4
RW
1493/**
1494 * pci_pme_capable - check the capability of PCI device to generate PME#
1495 * @dev: PCI device to handle.
eb9d0fe4
RW
1496 * @state: PCI state from which device will issue PME#.
1497 */
e5899e1b 1498bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1499{
337001b6 1500 if (!dev->pm_cap)
eb9d0fe4
RW
1501 return false;
1502
337001b6 1503 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1504}
1505
df17e62e
MG
1506static void pci_pme_list_scan(struct work_struct *work)
1507{
379021d5 1508 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1509
1510 mutex_lock(&pci_pme_list_mutex);
1511 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1512 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1513 if (pme_dev->dev->pme_poll) {
1514 pci_pme_wakeup(pme_dev->dev, NULL);
1515 } else {
1516 list_del(&pme_dev->list);
1517 kfree(pme_dev);
1518 }
1519 }
1520 if (!list_empty(&pci_pme_list))
1521 schedule_delayed_work(&pci_pme_work,
1522 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1523 }
1524 mutex_unlock(&pci_pme_list_mutex);
1525}
1526
eb9d0fe4
RW
1527/**
1528 * pci_pme_active - enable or disable PCI device's PME# function
1529 * @dev: PCI device to handle.
eb9d0fe4
RW
1530 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1531 *
1532 * The caller must verify that the device is capable of generating PME# before
1533 * calling this function with @enable equal to 'true'.
1534 */
5a6c9b60 1535void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1536{
1537 u16 pmcsr;
1538
337001b6 1539 if (!dev->pm_cap)
eb9d0fe4
RW
1540 return;
1541
337001b6 1542 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1543 /* Clear PME_Status by writing 1 to it and enable PME# */
1544 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1545 if (!enable)
1546 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1547
337001b6 1548 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1549
df17e62e
MG
1550 /* PCI (as opposed to PCIe) PME requires that the device have
1551 its PME# line hooked up correctly. Not all hardware vendors
1552 do this, so the PME never gets delivered and the device
1553 remains asleep. The easiest way around this is to
1554 periodically walk the list of suspended devices and check
1555 whether any have their PME flag set. The assumption is that
1556 we'll wake up often enough anyway that this won't be a huge
1557 hit, and the power savings from the devices will still be a
1558 win. */
1559
379021d5 1560 if (dev->pme_poll) {
df17e62e
MG
1561 struct pci_pme_device *pme_dev;
1562 if (enable) {
1563 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1564 GFP_KERNEL);
1565 if (!pme_dev)
1566 goto out;
1567 pme_dev->dev = dev;
1568 mutex_lock(&pci_pme_list_mutex);
1569 list_add(&pme_dev->list, &pci_pme_list);
1570 if (list_is_singular(&pci_pme_list))
1571 schedule_delayed_work(&pci_pme_work,
1572 msecs_to_jiffies(PME_TIMEOUT));
1573 mutex_unlock(&pci_pme_list_mutex);
1574 } else {
1575 mutex_lock(&pci_pme_list_mutex);
1576 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1577 if (pme_dev->dev == dev) {
1578 list_del(&pme_dev->list);
1579 kfree(pme_dev);
1580 break;
1581 }
1582 }
1583 mutex_unlock(&pci_pme_list_mutex);
1584 }
1585 }
1586
1587out:
85b8582d 1588 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1589}
1590
1da177e4 1591/**
6cbf8214 1592 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1593 * @dev: PCI device affected
1594 * @state: PCI state from which device will issue wakeup events
6cbf8214 1595 * @runtime: True if the events are to be generated at run time
075c1771
DB
1596 * @enable: True to enable event generation; false to disable
1597 *
1598 * This enables the device as a wakeup event source, or disables it.
1599 * When such events involves platform-specific hooks, those hooks are
1600 * called automatically by this routine.
1601 *
1602 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1603 * always require such platform hooks.
075c1771 1604 *
eb9d0fe4
RW
1605 * RETURN VALUE:
1606 * 0 is returned on success
1607 * -EINVAL is returned if device is not supposed to wake up the system
1608 * Error code depending on the platform is returned if both the platform and
1609 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1610 */
6cbf8214
RW
1611int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1612 bool runtime, bool enable)
1da177e4 1613{
5bcc2fb4 1614 int ret = 0;
075c1771 1615
6cbf8214 1616 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1617 return -EINVAL;
1da177e4 1618
e80bb09d
RW
1619 /* Don't do the same thing twice in a row for one device. */
1620 if (!!enable == !!dev->wakeup_prepared)
1621 return 0;
1622
eb9d0fe4
RW
1623 /*
1624 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1625 * Anderson we should be doing PME# wake enable followed by ACPI wake
1626 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1627 */
1da177e4 1628
5bcc2fb4
RW
1629 if (enable) {
1630 int error;
1da177e4 1631
5bcc2fb4
RW
1632 if (pci_pme_capable(dev, state))
1633 pci_pme_active(dev, true);
1634 else
1635 ret = 1;
6cbf8214
RW
1636 error = runtime ? platform_pci_run_wake(dev, true) :
1637 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1638 if (ret)
1639 ret = error;
e80bb09d
RW
1640 if (!ret)
1641 dev->wakeup_prepared = true;
5bcc2fb4 1642 } else {
6cbf8214
RW
1643 if (runtime)
1644 platform_pci_run_wake(dev, false);
1645 else
1646 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1647 pci_pme_active(dev, false);
e80bb09d 1648 dev->wakeup_prepared = false;
5bcc2fb4 1649 }
1da177e4 1650
5bcc2fb4 1651 return ret;
eb9d0fe4 1652}
6cbf8214 1653EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1654
0235c4fc
RW
1655/**
1656 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1657 * @dev: PCI device to prepare
1658 * @enable: True to enable wake-up event generation; false to disable
1659 *
1660 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1661 * and this function allows them to set that up cleanly - pci_enable_wake()
1662 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1663 * ordering constraints.
1664 *
1665 * This function only returns error code if the device is not capable of
1666 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1667 * enable wake-up power for it.
1668 */
1669int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1670{
1671 return pci_pme_capable(dev, PCI_D3cold) ?
1672 pci_enable_wake(dev, PCI_D3cold, enable) :
1673 pci_enable_wake(dev, PCI_D3hot, enable);
1674}
1675
404cc2d8 1676/**
37139074
JB
1677 * pci_target_state - find an appropriate low power state for a given PCI dev
1678 * @dev: PCI device
1679 *
1680 * Use underlying platform code to find a supported low power state for @dev.
1681 * If the platform can't manage @dev, return the deepest state from which it
1682 * can generate wake events, based on any available PME info.
404cc2d8 1683 */
e5899e1b 1684pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1685{
1686 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1687
1688 if (platform_pci_power_manageable(dev)) {
1689 /*
1690 * Call the platform to choose the target state of the device
1691 * and enable wake-up from this state if supported.
1692 */
1693 pci_power_t state = platform_pci_choose_state(dev);
1694
1695 switch (state) {
1696 case PCI_POWER_ERROR:
1697 case PCI_UNKNOWN:
1698 break;
1699 case PCI_D1:
1700 case PCI_D2:
1701 if (pci_no_d1d2(dev))
1702 break;
1703 default:
1704 target_state = state;
404cc2d8 1705 }
d2abdf62
RW
1706 } else if (!dev->pm_cap) {
1707 target_state = PCI_D0;
404cc2d8
RW
1708 } else if (device_may_wakeup(&dev->dev)) {
1709 /*
1710 * Find the deepest state from which the device can generate
1711 * wake-up events, make it the target state and enable device
1712 * to generate PME#.
1713 */
337001b6
RW
1714 if (dev->pme_support) {
1715 while (target_state
1716 && !(dev->pme_support & (1 << target_state)))
1717 target_state--;
404cc2d8
RW
1718 }
1719 }
1720
e5899e1b
RW
1721 return target_state;
1722}
1723
1724/**
1725 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1726 * @dev: Device to handle.
1727 *
1728 * Choose the power state appropriate for the device depending on whether
1729 * it can wake up the system and/or is power manageable by the platform
1730 * (PCI_D3hot is the default) and put the device into that state.
1731 */
1732int pci_prepare_to_sleep(struct pci_dev *dev)
1733{
1734 pci_power_t target_state = pci_target_state(dev);
1735 int error;
1736
1737 if (target_state == PCI_POWER_ERROR)
1738 return -EIO;
1739
8efb8c76 1740 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1741
404cc2d8
RW
1742 error = pci_set_power_state(dev, target_state);
1743
1744 if (error)
1745 pci_enable_wake(dev, target_state, false);
1746
1747 return error;
1748}
1749
1750/**
443bd1c4 1751 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1752 * @dev: Device to handle.
1753 *
88393161 1754 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1755 */
1756int pci_back_from_sleep(struct pci_dev *dev)
1757{
1758 pci_enable_wake(dev, PCI_D0, false);
1759 return pci_set_power_state(dev, PCI_D0);
1760}
1761
6cbf8214
RW
1762/**
1763 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1764 * @dev: PCI device being suspended.
1765 *
1766 * Prepare @dev to generate wake-up events at run time and put it into a low
1767 * power state.
1768 */
1769int pci_finish_runtime_suspend(struct pci_dev *dev)
1770{
1771 pci_power_t target_state = pci_target_state(dev);
1772 int error;
1773
1774 if (target_state == PCI_POWER_ERROR)
1775 return -EIO;
1776
1777 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1778
1779 error = pci_set_power_state(dev, target_state);
1780
1781 if (error)
1782 __pci_enable_wake(dev, target_state, true, false);
1783
1784 return error;
1785}
1786
b67ea761
RW
1787/**
1788 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1789 * @dev: Device to check.
1790 *
1791 * Return true if the device itself is cabable of generating wake-up events
1792 * (through the platform or using the native PCIe PME) or if the device supports
1793 * PME and one of its upstream bridges can generate wake-up events.
1794 */
1795bool pci_dev_run_wake(struct pci_dev *dev)
1796{
1797 struct pci_bus *bus = dev->bus;
1798
1799 if (device_run_wake(&dev->dev))
1800 return true;
1801
1802 if (!dev->pme_support)
1803 return false;
1804
1805 while (bus->parent) {
1806 struct pci_dev *bridge = bus->self;
1807
1808 if (device_run_wake(&bridge->dev))
1809 return true;
1810
1811 bus = bus->parent;
1812 }
1813
1814 /* We have reached the root bus. */
1815 if (bus->bridge)
1816 return device_run_wake(bus->bridge);
1817
1818 return false;
1819}
1820EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1821
eb9d0fe4
RW
1822/**
1823 * pci_pm_init - Initialize PM functions of given PCI device
1824 * @dev: PCI device to handle.
1825 */
1826void pci_pm_init(struct pci_dev *dev)
1827{
1828 int pm;
1829 u16 pmc;
1da177e4 1830
bb910a70 1831 pm_runtime_forbid(&dev->dev);
a1e4d72c 1832 device_enable_async_suspend(&dev->dev);
e80bb09d 1833 dev->wakeup_prepared = false;
bb910a70 1834
337001b6
RW
1835 dev->pm_cap = 0;
1836
eb9d0fe4
RW
1837 /* find PCI PM capability in list */
1838 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1839 if (!pm)
50246dd4 1840 return;
eb9d0fe4
RW
1841 /* Check device's ability to generate PME# */
1842 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1843
eb9d0fe4
RW
1844 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1845 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1846 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1847 return;
eb9d0fe4
RW
1848 }
1849
337001b6 1850 dev->pm_cap = pm;
1ae861e6 1851 dev->d3_delay = PCI_PM_D3_WAIT;
337001b6
RW
1852
1853 dev->d1_support = false;
1854 dev->d2_support = false;
1855 if (!pci_no_d1d2(dev)) {
c9ed77ee 1856 if (pmc & PCI_PM_CAP_D1)
337001b6 1857 dev->d1_support = true;
c9ed77ee 1858 if (pmc & PCI_PM_CAP_D2)
337001b6 1859 dev->d2_support = true;
c9ed77ee
BH
1860
1861 if (dev->d1_support || dev->d2_support)
1862 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1863 dev->d1_support ? " D1" : "",
1864 dev->d2_support ? " D2" : "");
337001b6
RW
1865 }
1866
1867 pmc &= PCI_PM_CAP_PME_MASK;
1868 if (pmc) {
10c3d71d
BH
1869 dev_printk(KERN_DEBUG, &dev->dev,
1870 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1871 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1872 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1873 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1874 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1875 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1876 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1877 dev->pme_poll = true;
eb9d0fe4
RW
1878 /*
1879 * Make device's PM flags reflect the wake-up capability, but
1880 * let the user space enable it to wake up the system as needed.
1881 */
1882 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1883 /* Disable the PME# generation functionality */
337001b6
RW
1884 pci_pme_active(dev, false);
1885 } else {
1886 dev->pme_support = 0;
eb9d0fe4 1887 }
1da177e4
LT
1888}
1889
eb9c39d0
JB
1890/**
1891 * platform_pci_wakeup_init - init platform wakeup if present
1892 * @dev: PCI device
1893 *
1894 * Some devices don't have PCI PM caps but can still generate wakeup
1895 * events through platform methods (like ACPI events). If @dev supports
1896 * platform wakeup events, set the device flag to indicate as much. This
1897 * may be redundant if the device also supports PCI PM caps, but double
1898 * initialization should be safe in that case.
1899 */
1900void platform_pci_wakeup_init(struct pci_dev *dev)
1901{
1902 if (!platform_pci_can_wakeup(dev))
1903 return;
1904
1905 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
1906 platform_pci_sleep_wake(dev, false);
1907}
1908
34a4876e
YL
1909static void pci_add_saved_cap(struct pci_dev *pci_dev,
1910 struct pci_cap_saved_state *new_cap)
1911{
1912 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1913}
1914
63f4898a
RW
1915/**
1916 * pci_add_save_buffer - allocate buffer for saving given capability registers
1917 * @dev: the PCI device
1918 * @cap: the capability to allocate the buffer for
1919 * @size: requested size of the buffer
1920 */
1921static int pci_add_cap_save_buffer(
1922 struct pci_dev *dev, char cap, unsigned int size)
1923{
1924 int pos;
1925 struct pci_cap_saved_state *save_state;
1926
1927 pos = pci_find_capability(dev, cap);
1928 if (pos <= 0)
1929 return 0;
1930
1931 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1932 if (!save_state)
1933 return -ENOMEM;
1934
24a4742f
AW
1935 save_state->cap.cap_nr = cap;
1936 save_state->cap.size = size;
63f4898a
RW
1937 pci_add_saved_cap(dev, save_state);
1938
1939 return 0;
1940}
1941
1942/**
1943 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1944 * @dev: the PCI device
1945 */
1946void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1947{
1948 int error;
1949
89858517
YZ
1950 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1951 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
1952 if (error)
1953 dev_err(&dev->dev,
1954 "unable to preallocate PCI Express save buffer\n");
1955
1956 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1957 if (error)
1958 dev_err(&dev->dev,
1959 "unable to preallocate PCI-X save buffer\n");
1960}
1961
f796841e
YL
1962void pci_free_cap_save_buffers(struct pci_dev *dev)
1963{
1964 struct pci_cap_saved_state *tmp;
1965 struct hlist_node *pos, *n;
1966
1967 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1968 kfree(tmp);
1969}
1970
58c3a727
YZ
1971/**
1972 * pci_enable_ari - enable ARI forwarding if hardware support it
1973 * @dev: the PCI device
1974 */
1975void pci_enable_ari(struct pci_dev *dev)
1976{
1977 int pos;
1978 u32 cap;
864d296c 1979 u16 flags, ctrl;
8113587c 1980 struct pci_dev *bridge;
58c3a727 1981
6748dcc2 1982 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
1983 return;
1984
8113587c
ZY
1985 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1986 if (!pos)
58c3a727
YZ
1987 return;
1988
8113587c 1989 bridge = dev->bus->self;
5f4d91a1 1990 if (!bridge || !pci_is_pcie(bridge))
8113587c
ZY
1991 return;
1992
06a1cbaf 1993 pos = pci_pcie_cap(bridge);
58c3a727
YZ
1994 if (!pos)
1995 return;
1996
864d296c
CW
1997 /* ARI is a PCIe v2 feature */
1998 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1999 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2000 return;
2001
8113587c 2002 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2003 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2004 return;
2005
8113587c 2006 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
58c3a727 2007 ctrl |= PCI_EXP_DEVCTL2_ARI;
8113587c 2008 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
58c3a727 2009
8113587c 2010 bridge->ari_enabled = 1;
58c3a727
YZ
2011}
2012
b48d4425
JB
2013/**
2014 * pci_enable_ido - enable ID-based ordering on a device
2015 * @dev: the PCI device
2016 * @type: which types of IDO to enable
2017 *
2018 * Enable ID-based ordering on @dev. @type can contain the bits
2019 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2020 * which types of transactions are allowed to be re-ordered.
2021 */
2022void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2023{
2024 int pos;
2025 u16 ctrl;
2026
2027 pos = pci_pcie_cap(dev);
2028 if (!pos)
2029 return;
2030
2031 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2032 if (type & PCI_EXP_IDO_REQUEST)
2033 ctrl |= PCI_EXP_IDO_REQ_EN;
2034 if (type & PCI_EXP_IDO_COMPLETION)
2035 ctrl |= PCI_EXP_IDO_CMP_EN;
2036 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2037}
2038EXPORT_SYMBOL(pci_enable_ido);
2039
2040/**
2041 * pci_disable_ido - disable ID-based ordering on a device
2042 * @dev: the PCI device
2043 * @type: which types of IDO to disable
2044 */
2045void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2046{
2047 int pos;
2048 u16 ctrl;
2049
2050 if (!pci_is_pcie(dev))
2051 return;
2052
2053 pos = pci_pcie_cap(dev);
2054 if (!pos)
2055 return;
2056
2057 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2058 if (type & PCI_EXP_IDO_REQUEST)
2059 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2060 if (type & PCI_EXP_IDO_COMPLETION)
2061 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2062 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2063}
2064EXPORT_SYMBOL(pci_disable_ido);
2065
48a92a81
JB
2066/**
2067 * pci_enable_obff - enable optimized buffer flush/fill
2068 * @dev: PCI device
2069 * @type: type of signaling to use
2070 *
2071 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2072 * signaling if possible, falling back to message signaling only if
2073 * WAKE# isn't supported. @type should indicate whether the PCIe link
2074 * be brought out of L0s or L1 to send the message. It should be either
2075 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2076 *
2077 * If your device can benefit from receiving all messages, even at the
2078 * power cost of bringing the link back up from a low power state, use
2079 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2080 * preferred type).
2081 *
2082 * RETURNS:
2083 * Zero on success, appropriate error number on failure.
2084 */
2085int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2086{
2087 int pos;
2088 u32 cap;
2089 u16 ctrl;
2090 int ret;
2091
2092 if (!pci_is_pcie(dev))
2093 return -ENOTSUPP;
2094
2095 pos = pci_pcie_cap(dev);
2096 if (!pos)
2097 return -ENOTSUPP;
2098
2099 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2100 if (!(cap & PCI_EXP_OBFF_MASK))
2101 return -ENOTSUPP; /* no OBFF support at all */
2102
2103 /* Make sure the topology supports OBFF as well */
2104 if (dev->bus) {
2105 ret = pci_enable_obff(dev->bus->self, type);
2106 if (ret)
2107 return ret;
2108 }
2109
2110 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2111 if (cap & PCI_EXP_OBFF_WAKE)
2112 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2113 else {
2114 switch (type) {
2115 case PCI_EXP_OBFF_SIGNAL_L0:
2116 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2117 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2118 break;
2119 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2120 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2121 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2122 break;
2123 default:
2124 WARN(1, "bad OBFF signal type\n");
2125 return -ENOTSUPP;
2126 }
2127 }
2128 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2129
2130 return 0;
2131}
2132EXPORT_SYMBOL(pci_enable_obff);
2133
2134/**
2135 * pci_disable_obff - disable optimized buffer flush/fill
2136 * @dev: PCI device
2137 *
2138 * Disable OBFF on @dev.
2139 */
2140void pci_disable_obff(struct pci_dev *dev)
2141{
2142 int pos;
2143 u16 ctrl;
2144
2145 if (!pci_is_pcie(dev))
2146 return;
2147
2148 pos = pci_pcie_cap(dev);
2149 if (!pos)
2150 return;
2151
2152 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2153 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2154 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2155}
2156EXPORT_SYMBOL(pci_disable_obff);
2157
51c2e0a7
JB
2158/**
2159 * pci_ltr_supported - check whether a device supports LTR
2160 * @dev: PCI device
2161 *
2162 * RETURNS:
2163 * True if @dev supports latency tolerance reporting, false otherwise.
2164 */
2165bool pci_ltr_supported(struct pci_dev *dev)
2166{
2167 int pos;
2168 u32 cap;
2169
2170 if (!pci_is_pcie(dev))
2171 return false;
2172
2173 pos = pci_pcie_cap(dev);
2174 if (!pos)
2175 return false;
2176
2177 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2178
2179 return cap & PCI_EXP_DEVCAP2_LTR;
2180}
2181EXPORT_SYMBOL(pci_ltr_supported);
2182
2183/**
2184 * pci_enable_ltr - enable latency tolerance reporting
2185 * @dev: PCI device
2186 *
2187 * Enable LTR on @dev if possible, which means enabling it first on
2188 * upstream ports.
2189 *
2190 * RETURNS:
2191 * Zero on success, errno on failure.
2192 */
2193int pci_enable_ltr(struct pci_dev *dev)
2194{
2195 int pos;
2196 u16 ctrl;
2197 int ret;
2198
2199 if (!pci_ltr_supported(dev))
2200 return -ENOTSUPP;
2201
2202 pos = pci_pcie_cap(dev);
2203 if (!pos)
2204 return -ENOTSUPP;
2205
2206 /* Only primary function can enable/disable LTR */
2207 if (PCI_FUNC(dev->devfn) != 0)
2208 return -EINVAL;
2209
2210 /* Enable upstream ports first */
2211 if (dev->bus) {
2212 ret = pci_enable_ltr(dev->bus->self);
2213 if (ret)
2214 return ret;
2215 }
2216
2217 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2218 ctrl |= PCI_EXP_LTR_EN;
2219 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2220
2221 return 0;
2222}
2223EXPORT_SYMBOL(pci_enable_ltr);
2224
2225/**
2226 * pci_disable_ltr - disable latency tolerance reporting
2227 * @dev: PCI device
2228 */
2229void pci_disable_ltr(struct pci_dev *dev)
2230{
2231 int pos;
2232 u16 ctrl;
2233
2234 if (!pci_ltr_supported(dev))
2235 return;
2236
2237 pos = pci_pcie_cap(dev);
2238 if (!pos)
2239 return;
2240
2241 /* Only primary function can enable/disable LTR */
2242 if (PCI_FUNC(dev->devfn) != 0)
2243 return;
2244
2245 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2246 ctrl &= ~PCI_EXP_LTR_EN;
2247 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2248}
2249EXPORT_SYMBOL(pci_disable_ltr);
2250
2251static int __pci_ltr_scale(int *val)
2252{
2253 int scale = 0;
2254
2255 while (*val > 1023) {
2256 *val = (*val + 31) / 32;
2257 scale++;
2258 }
2259 return scale;
2260}
2261
2262/**
2263 * pci_set_ltr - set LTR latency values
2264 * @dev: PCI device
2265 * @snoop_lat_ns: snoop latency in nanoseconds
2266 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2267 *
2268 * Figure out the scale and set the LTR values accordingly.
2269 */
2270int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2271{
2272 int pos, ret, snoop_scale, nosnoop_scale;
2273 u16 val;
2274
2275 if (!pci_ltr_supported(dev))
2276 return -ENOTSUPP;
2277
2278 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2279 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2280
2281 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2282 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2283 return -EINVAL;
2284
2285 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2286 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2287 return -EINVAL;
2288
2289 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2290 if (!pos)
2291 return -ENOTSUPP;
2292
2293 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2294 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2295 if (ret != 4)
2296 return -EIO;
2297
2298 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2299 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2300 if (ret != 4)
2301 return -EIO;
2302
2303 return 0;
2304}
2305EXPORT_SYMBOL(pci_set_ltr);
2306
5d990b62
CW
2307static int pci_acs_enable;
2308
2309/**
2310 * pci_request_acs - ask for ACS to be enabled if supported
2311 */
2312void pci_request_acs(void)
2313{
2314 pci_acs_enable = 1;
2315}
2316
ae21ee65
AK
2317/**
2318 * pci_enable_acs - enable ACS if hardware support it
2319 * @dev: the PCI device
2320 */
2321void pci_enable_acs(struct pci_dev *dev)
2322{
2323 int pos;
2324 u16 cap;
2325 u16 ctrl;
2326
5d990b62
CW
2327 if (!pci_acs_enable)
2328 return;
2329
5f4d91a1 2330 if (!pci_is_pcie(dev))
ae21ee65
AK
2331 return;
2332
2333 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2334 if (!pos)
2335 return;
2336
2337 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2338 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2339
2340 /* Source Validation */
2341 ctrl |= (cap & PCI_ACS_SV);
2342
2343 /* P2P Request Redirect */
2344 ctrl |= (cap & PCI_ACS_RR);
2345
2346 /* P2P Completion Redirect */
2347 ctrl |= (cap & PCI_ACS_CR);
2348
2349 /* Upstream Forwarding */
2350 ctrl |= (cap & PCI_ACS_UF);
2351
2352 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2353}
2354
57c2cf71
BH
2355/**
2356 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2357 * @dev: the PCI device
2358 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2359 *
2360 * Perform INTx swizzling for a device behind one level of bridge. This is
2361 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2362 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2363 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2364 * the PCI Express Base Specification, Revision 2.1)
57c2cf71
BH
2365 */
2366u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2367{
46b952a3
MW
2368 int slot;
2369
2370 if (pci_ari_enabled(dev->bus))
2371 slot = 0;
2372 else
2373 slot = PCI_SLOT(dev->devfn);
2374
2375 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2376}
2377
1da177e4
LT
2378int
2379pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2380{
2381 u8 pin;
2382
514d207d 2383 pin = dev->pin;
1da177e4
LT
2384 if (!pin)
2385 return -1;
878f2e50 2386
8784fd4d 2387 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2388 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2389 dev = dev->bus->self;
2390 }
2391 *bridge = dev;
2392 return pin;
2393}
2394
68feac87
BH
2395/**
2396 * pci_common_swizzle - swizzle INTx all the way to root bridge
2397 * @dev: the PCI device
2398 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2399 *
2400 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2401 * bridges all the way up to a PCI root bus.
2402 */
2403u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2404{
2405 u8 pin = *pinp;
2406
1eb39487 2407 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2408 pin = pci_swizzle_interrupt_pin(dev, pin);
2409 dev = dev->bus->self;
2410 }
2411 *pinp = pin;
2412 return PCI_SLOT(dev->devfn);
2413}
2414
1da177e4
LT
2415/**
2416 * pci_release_region - Release a PCI bar
2417 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2418 * @bar: BAR to release
2419 *
2420 * Releases the PCI I/O and memory resources previously reserved by a
2421 * successful call to pci_request_region. Call this function only
2422 * after all use of the PCI regions has ceased.
2423 */
2424void pci_release_region(struct pci_dev *pdev, int bar)
2425{
9ac7849e
TH
2426 struct pci_devres *dr;
2427
1da177e4
LT
2428 if (pci_resource_len(pdev, bar) == 0)
2429 return;
2430 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2431 release_region(pci_resource_start(pdev, bar),
2432 pci_resource_len(pdev, bar));
2433 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2434 release_mem_region(pci_resource_start(pdev, bar),
2435 pci_resource_len(pdev, bar));
9ac7849e
TH
2436
2437 dr = find_pci_dr(pdev);
2438 if (dr)
2439 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2440}
2441
2442/**
f5ddcac4 2443 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2444 * @pdev: PCI device whose resources are to be reserved
2445 * @bar: BAR to be reserved
2446 * @res_name: Name to be associated with resource.
f5ddcac4 2447 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2448 *
2449 * Mark the PCI region associated with PCI device @pdev BR @bar as
2450 * being reserved by owner @res_name. Do not access any
2451 * address inside the PCI regions unless this call returns
2452 * successfully.
2453 *
f5ddcac4
RD
2454 * If @exclusive is set, then the region is marked so that userspace
2455 * is explicitly not allowed to map the resource via /dev/mem or
2456 * sysfs MMIO access.
2457 *
1da177e4
LT
2458 * Returns 0 on success, or %EBUSY on error. A warning
2459 * message is also printed on failure.
2460 */
e8de1481
AV
2461static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2462 int exclusive)
1da177e4 2463{
9ac7849e
TH
2464 struct pci_devres *dr;
2465
1da177e4
LT
2466 if (pci_resource_len(pdev, bar) == 0)
2467 return 0;
2468
2469 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2470 if (!request_region(pci_resource_start(pdev, bar),
2471 pci_resource_len(pdev, bar), res_name))
2472 goto err_out;
2473 }
2474 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2475 if (!__request_mem_region(pci_resource_start(pdev, bar),
2476 pci_resource_len(pdev, bar), res_name,
2477 exclusive))
1da177e4
LT
2478 goto err_out;
2479 }
9ac7849e
TH
2480
2481 dr = find_pci_dr(pdev);
2482 if (dr)
2483 dr->region_mask |= 1 << bar;
2484
1da177e4
LT
2485 return 0;
2486
2487err_out:
c7dabef8 2488 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2489 &pdev->resource[bar]);
1da177e4
LT
2490 return -EBUSY;
2491}
2492
e8de1481 2493/**
f5ddcac4 2494 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2495 * @pdev: PCI device whose resources are to be reserved
2496 * @bar: BAR to be reserved
f5ddcac4 2497 * @res_name: Name to be associated with resource
e8de1481 2498 *
f5ddcac4 2499 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2500 * being reserved by owner @res_name. Do not access any
2501 * address inside the PCI regions unless this call returns
2502 * successfully.
2503 *
2504 * Returns 0 on success, or %EBUSY on error. A warning
2505 * message is also printed on failure.
2506 */
2507int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2508{
2509 return __pci_request_region(pdev, bar, res_name, 0);
2510}
2511
2512/**
2513 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2514 * @pdev: PCI device whose resources are to be reserved
2515 * @bar: BAR to be reserved
2516 * @res_name: Name to be associated with resource.
2517 *
2518 * Mark the PCI region associated with PCI device @pdev BR @bar as
2519 * being reserved by owner @res_name. Do not access any
2520 * address inside the PCI regions unless this call returns
2521 * successfully.
2522 *
2523 * Returns 0 on success, or %EBUSY on error. A warning
2524 * message is also printed on failure.
2525 *
2526 * The key difference that _exclusive makes it that userspace is
2527 * explicitly not allowed to map the resource via /dev/mem or
2528 * sysfs.
2529 */
2530int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2531{
2532 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2533}
c87deff7
HS
2534/**
2535 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2536 * @pdev: PCI device whose resources were previously reserved
2537 * @bars: Bitmask of BARs to be released
2538 *
2539 * Release selected PCI I/O and memory resources previously reserved.
2540 * Call this function only after all use of the PCI regions has ceased.
2541 */
2542void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2543{
2544 int i;
2545
2546 for (i = 0; i < 6; i++)
2547 if (bars & (1 << i))
2548 pci_release_region(pdev, i);
2549}
2550
e8de1481
AV
2551int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2552 const char *res_name, int excl)
c87deff7
HS
2553{
2554 int i;
2555
2556 for (i = 0; i < 6; i++)
2557 if (bars & (1 << i))
e8de1481 2558 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2559 goto err_out;
2560 return 0;
2561
2562err_out:
2563 while(--i >= 0)
2564 if (bars & (1 << i))
2565 pci_release_region(pdev, i);
2566
2567 return -EBUSY;
2568}
1da177e4 2569
e8de1481
AV
2570
2571/**
2572 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2573 * @pdev: PCI device whose resources are to be reserved
2574 * @bars: Bitmask of BARs to be requested
2575 * @res_name: Name to be associated with resource
2576 */
2577int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2578 const char *res_name)
2579{
2580 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2581}
2582
2583int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2584 int bars, const char *res_name)
2585{
2586 return __pci_request_selected_regions(pdev, bars, res_name,
2587 IORESOURCE_EXCLUSIVE);
2588}
2589
1da177e4
LT
2590/**
2591 * pci_release_regions - Release reserved PCI I/O and memory resources
2592 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2593 *
2594 * Releases all PCI I/O and memory resources previously reserved by a
2595 * successful call to pci_request_regions. Call this function only
2596 * after all use of the PCI regions has ceased.
2597 */
2598
2599void pci_release_regions(struct pci_dev *pdev)
2600{
c87deff7 2601 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2602}
2603
2604/**
2605 * pci_request_regions - Reserved PCI I/O and memory resources
2606 * @pdev: PCI device whose resources are to be reserved
2607 * @res_name: Name to be associated with resource.
2608 *
2609 * Mark all PCI regions associated with PCI device @pdev as
2610 * being reserved by owner @res_name. Do not access any
2611 * address inside the PCI regions unless this call returns
2612 * successfully.
2613 *
2614 * Returns 0 on success, or %EBUSY on error. A warning
2615 * message is also printed on failure.
2616 */
3c990e92 2617int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2618{
c87deff7 2619 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2620}
2621
e8de1481
AV
2622/**
2623 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2624 * @pdev: PCI device whose resources are to be reserved
2625 * @res_name: Name to be associated with resource.
2626 *
2627 * Mark all PCI regions associated with PCI device @pdev as
2628 * being reserved by owner @res_name. Do not access any
2629 * address inside the PCI regions unless this call returns
2630 * successfully.
2631 *
2632 * pci_request_regions_exclusive() will mark the region so that
2633 * /dev/mem and the sysfs MMIO access will not be allowed.
2634 *
2635 * Returns 0 on success, or %EBUSY on error. A warning
2636 * message is also printed on failure.
2637 */
2638int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2639{
2640 return pci_request_selected_regions_exclusive(pdev,
2641 ((1 << 6) - 1), res_name);
2642}
2643
6a479079
BH
2644static void __pci_set_master(struct pci_dev *dev, bool enable)
2645{
2646 u16 old_cmd, cmd;
2647
2648 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2649 if (enable)
2650 cmd = old_cmd | PCI_COMMAND_MASTER;
2651 else
2652 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2653 if (cmd != old_cmd) {
2654 dev_dbg(&dev->dev, "%s bus mastering\n",
2655 enable ? "enabling" : "disabling");
2656 pci_write_config_word(dev, PCI_COMMAND, cmd);
2657 }
2658 dev->is_busmaster = enable;
2659}
e8de1481 2660
96c55900
MS
2661/**
2662 * pcibios_set_master - enable PCI bus-mastering for device dev
2663 * @dev: the PCI device to enable
2664 *
2665 * Enables PCI bus-mastering for the device. This is the default
2666 * implementation. Architecture specific implementations can override
2667 * this if necessary.
2668 */
2669void __weak pcibios_set_master(struct pci_dev *dev)
2670{
2671 u8 lat;
2672
f676678f
MS
2673 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2674 if (pci_is_pcie(dev))
2675 return;
2676
96c55900
MS
2677 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2678 if (lat < 16)
2679 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2680 else if (lat > pcibios_max_latency)
2681 lat = pcibios_max_latency;
2682 else
2683 return;
2684 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2685 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2686}
2687
1da177e4
LT
2688/**
2689 * pci_set_master - enables bus-mastering for device dev
2690 * @dev: the PCI device to enable
2691 *
2692 * Enables bus-mastering on the device and calls pcibios_set_master()
2693 * to do the needed arch specific settings.
2694 */
6a479079 2695void pci_set_master(struct pci_dev *dev)
1da177e4 2696{
6a479079 2697 __pci_set_master(dev, true);
1da177e4
LT
2698 pcibios_set_master(dev);
2699}
2700
6a479079
BH
2701/**
2702 * pci_clear_master - disables bus-mastering for device dev
2703 * @dev: the PCI device to disable
2704 */
2705void pci_clear_master(struct pci_dev *dev)
2706{
2707 __pci_set_master(dev, false);
2708}
2709
1da177e4 2710/**
edb2d97e
MW
2711 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2712 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2713 *
edb2d97e
MW
2714 * Helper function for pci_set_mwi.
2715 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2716 * Copyright 1998-2001 by Jes Sorensen, <[email protected]>.
2717 *
2718 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2719 */
15ea76d4 2720int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2721{
2722 u8 cacheline_size;
2723
2724 if (!pci_cache_line_size)
15ea76d4 2725 return -EINVAL;
1da177e4
LT
2726
2727 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2728 equal to or multiple of the right value. */
2729 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2730 if (cacheline_size >= pci_cache_line_size &&
2731 (cacheline_size % pci_cache_line_size) == 0)
2732 return 0;
2733
2734 /* Write the correct value. */
2735 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2736 /* Read it back. */
2737 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2738 if (cacheline_size == pci_cache_line_size)
2739 return 0;
2740
80ccba11
BH
2741 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2742 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2743
2744 return -EINVAL;
2745}
15ea76d4
TH
2746EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2747
2748#ifdef PCI_DISABLE_MWI
2749int pci_set_mwi(struct pci_dev *dev)
2750{
2751 return 0;
2752}
2753
2754int pci_try_set_mwi(struct pci_dev *dev)
2755{
2756 return 0;
2757}
2758
2759void pci_clear_mwi(struct pci_dev *dev)
2760{
2761}
2762
2763#else
1da177e4
LT
2764
2765/**
2766 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2767 * @dev: the PCI device for which MWI is enabled
2768 *
694625c0 2769 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2770 *
2771 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2772 */
2773int
2774pci_set_mwi(struct pci_dev *dev)
2775{
2776 int rc;
2777 u16 cmd;
2778
edb2d97e 2779 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2780 if (rc)
2781 return rc;
2782
2783 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2784 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2785 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2786 cmd |= PCI_COMMAND_INVALIDATE;
2787 pci_write_config_word(dev, PCI_COMMAND, cmd);
2788 }
2789
2790 return 0;
2791}
2792
694625c0
RD
2793/**
2794 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2795 * @dev: the PCI device for which MWI is enabled
2796 *
2797 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2798 * Callers are not required to check the return value.
2799 *
2800 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2801 */
2802int pci_try_set_mwi(struct pci_dev *dev)
2803{
2804 int rc = pci_set_mwi(dev);
2805 return rc;
2806}
2807
1da177e4
LT
2808/**
2809 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2810 * @dev: the PCI device to disable
2811 *
2812 * Disables PCI Memory-Write-Invalidate transaction on the device
2813 */
2814void
2815pci_clear_mwi(struct pci_dev *dev)
2816{
2817 u16 cmd;
2818
2819 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2820 if (cmd & PCI_COMMAND_INVALIDATE) {
2821 cmd &= ~PCI_COMMAND_INVALIDATE;
2822 pci_write_config_word(dev, PCI_COMMAND, cmd);
2823 }
2824}
edb2d97e 2825#endif /* ! PCI_DISABLE_MWI */
1da177e4 2826
a04ce0ff
BR
2827/**
2828 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2829 * @pdev: the PCI device to operate on
2830 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2831 *
2832 * Enables/disables PCI INTx for device dev
2833 */
2834void
2835pci_intx(struct pci_dev *pdev, int enable)
2836{
2837 u16 pci_command, new;
2838
2839 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2840
2841 if (enable) {
2842 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2843 } else {
2844 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2845 }
2846
2847 if (new != pci_command) {
9ac7849e
TH
2848 struct pci_devres *dr;
2849
2fd9d74b 2850 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2851
2852 dr = find_pci_dr(pdev);
2853 if (dr && !dr->restore_intx) {
2854 dr->restore_intx = 1;
2855 dr->orig_intx = !enable;
2856 }
a04ce0ff
BR
2857 }
2858}
2859
a2e27787
JK
2860/**
2861 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2862 * @dev: the PCI device to operate on
a2e27787
JK
2863 *
2864 * Check if the device dev support INTx masking via the config space
2865 * command word.
2866 */
2867bool pci_intx_mask_supported(struct pci_dev *dev)
2868{
2869 bool mask_supported = false;
2870 u16 orig, new;
2871
2872 pci_cfg_access_lock(dev);
2873
2874 pci_read_config_word(dev, PCI_COMMAND, &orig);
2875 pci_write_config_word(dev, PCI_COMMAND,
2876 orig ^ PCI_COMMAND_INTX_DISABLE);
2877 pci_read_config_word(dev, PCI_COMMAND, &new);
2878
2879 /*
2880 * There's no way to protect against hardware bugs or detect them
2881 * reliably, but as long as we know what the value should be, let's
2882 * go ahead and check it.
2883 */
2884 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2885 dev_err(&dev->dev, "Command register changed from "
2886 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2887 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2888 mask_supported = true;
2889 pci_write_config_word(dev, PCI_COMMAND, orig);
2890 }
2891
2892 pci_cfg_access_unlock(dev);
2893 return mask_supported;
2894}
2895EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2896
2897static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2898{
2899 struct pci_bus *bus = dev->bus;
2900 bool mask_updated = true;
2901 u32 cmd_status_dword;
2902 u16 origcmd, newcmd;
2903 unsigned long flags;
2904 bool irq_pending;
2905
2906 /*
2907 * We do a single dword read to retrieve both command and status.
2908 * Document assumptions that make this possible.
2909 */
2910 BUILD_BUG_ON(PCI_COMMAND % 4);
2911 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2912
2913 raw_spin_lock_irqsave(&pci_lock, flags);
2914
2915 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2916
2917 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2918
2919 /*
2920 * Check interrupt status register to see whether our device
2921 * triggered the interrupt (when masking) or the next IRQ is
2922 * already pending (when unmasking).
2923 */
2924 if (mask != irq_pending) {
2925 mask_updated = false;
2926 goto done;
2927 }
2928
2929 origcmd = cmd_status_dword;
2930 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2931 if (mask)
2932 newcmd |= PCI_COMMAND_INTX_DISABLE;
2933 if (newcmd != origcmd)
2934 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2935
2936done:
2937 raw_spin_unlock_irqrestore(&pci_lock, flags);
2938
2939 return mask_updated;
2940}
2941
2942/**
2943 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 2944 * @dev: the PCI device to operate on
a2e27787
JK
2945 *
2946 * Check if the device dev has its INTx line asserted, mask it and
2947 * return true in that case. False is returned if not interrupt was
2948 * pending.
2949 */
2950bool pci_check_and_mask_intx(struct pci_dev *dev)
2951{
2952 return pci_check_and_set_intx_mask(dev, true);
2953}
2954EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2955
2956/**
2957 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
6e9292c5 2958 * @dev: the PCI device to operate on
a2e27787
JK
2959 *
2960 * Check if the device dev has its INTx line asserted, unmask it if not
2961 * and return true. False is returned and the mask remains active if
2962 * there was still an interrupt pending.
2963 */
2964bool pci_check_and_unmask_intx(struct pci_dev *dev)
2965{
2966 return pci_check_and_set_intx_mask(dev, false);
2967}
2968EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2969
f5f2b131
EB
2970/**
2971 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 2972 * @dev: the PCI device to operate on
f5f2b131
EB
2973 *
2974 * If you want to use msi see pci_enable_msi and friends.
2975 * This is a lower level primitive that allows us to disable
2976 * msi operation at the device level.
2977 */
2978void pci_msi_off(struct pci_dev *dev)
2979{
2980 int pos;
2981 u16 control;
2982
2983 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2984 if (pos) {
2985 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2986 control &= ~PCI_MSI_FLAGS_ENABLE;
2987 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2988 }
2989 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2990 if (pos) {
2991 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2992 control &= ~PCI_MSIX_FLAGS_ENABLE;
2993 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2994 }
2995}
b03214d5 2996EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 2997
4d57cdfa
FT
2998int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2999{
3000 return dma_set_max_seg_size(&dev->dev, size);
3001}
3002EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3003
59fc67de
FT
3004int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3005{
3006 return dma_set_seg_boundary(&dev->dev, mask);
3007}
3008EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3009
8c1c699f 3010static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 3011{
8c1c699f
YZ
3012 int i;
3013 int pos;
8dd7f803 3014 u32 cap;
04b55c47 3015 u16 status, control;
8dd7f803 3016
06a1cbaf 3017 pos = pci_pcie_cap(dev);
8c1c699f 3018 if (!pos)
8dd7f803 3019 return -ENOTTY;
8c1c699f
YZ
3020
3021 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
3022 if (!(cap & PCI_EXP_DEVCAP_FLR))
3023 return -ENOTTY;
3024
d91cdc74
SY
3025 if (probe)
3026 return 0;
3027
8dd7f803 3028 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3029 for (i = 0; i < 4; i++) {
3030 if (i)
3031 msleep((1 << (i - 1)) * 100);
5fe5db05 3032
8c1c699f
YZ
3033 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3034 if (!(status & PCI_EXP_DEVSTA_TRPND))
3035 goto clear;
3036 }
3037
3038 dev_err(&dev->dev, "transaction is not cleared; "
3039 "proceeding with reset anyway\n");
3040
3041clear:
04b55c47
SR
3042 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3043 control |= PCI_EXP_DEVCTL_BCR_FLR;
3044 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3045
8c1c699f 3046 msleep(100);
8dd7f803 3047
8dd7f803
SY
3048 return 0;
3049}
d91cdc74 3050
8c1c699f 3051static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3052{
8c1c699f
YZ
3053 int i;
3054 int pos;
1ca88797 3055 u8 cap;
8c1c699f 3056 u8 status;
1ca88797 3057
8c1c699f
YZ
3058 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3059 if (!pos)
1ca88797 3060 return -ENOTTY;
8c1c699f
YZ
3061
3062 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3063 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3064 return -ENOTTY;
3065
3066 if (probe)
3067 return 0;
3068
1ca88797 3069 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3070 for (i = 0; i < 4; i++) {
3071 if (i)
3072 msleep((1 << (i - 1)) * 100);
3073
3074 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3075 if (!(status & PCI_AF_STATUS_TP))
3076 goto clear;
3077 }
5fe5db05 3078
8c1c699f
YZ
3079 dev_err(&dev->dev, "transaction is not cleared; "
3080 "proceeding with reset anyway\n");
5fe5db05 3081
8c1c699f
YZ
3082clear:
3083 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3084 msleep(100);
8c1c699f 3085
1ca88797
SY
3086 return 0;
3087}
3088
83d74e03
RW
3089/**
3090 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3091 * @dev: Device to reset.
3092 * @probe: If set, only check if the device can be reset this way.
3093 *
3094 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3095 * unset, it will be reinitialized internally when going from PCI_D3hot to
3096 * PCI_D0. If that's the case and the device is not in a low-power state
3097 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3098 *
3099 * NOTE: This causes the caller to sleep for twice the device power transition
3100 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3101 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3102 * Moreover, only devices in D0 can be reset by this function.
3103 */
f85876ba 3104static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3105{
f85876ba
YZ
3106 u16 csr;
3107
3108 if (!dev->pm_cap)
3109 return -ENOTTY;
d91cdc74 3110
f85876ba
YZ
3111 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3112 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3113 return -ENOTTY;
d91cdc74 3114
f85876ba
YZ
3115 if (probe)
3116 return 0;
1ca88797 3117
f85876ba
YZ
3118 if (dev->current_state != PCI_D0)
3119 return -EINVAL;
3120
3121 csr &= ~PCI_PM_CTRL_STATE_MASK;
3122 csr |= PCI_D3hot;
3123 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3124 pci_dev_d3_sleep(dev);
f85876ba
YZ
3125
3126 csr &= ~PCI_PM_CTRL_STATE_MASK;
3127 csr |= PCI_D0;
3128 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3129 pci_dev_d3_sleep(dev);
f85876ba
YZ
3130
3131 return 0;
3132}
3133
c12ff1df
YZ
3134static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3135{
3136 u16 ctrl;
3137 struct pci_dev *pdev;
3138
654b75e0 3139 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3140 return -ENOTTY;
3141
3142 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3143 if (pdev != dev)
3144 return -ENOTTY;
3145
3146 if (probe)
3147 return 0;
3148
3149 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3150 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3151 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3152 msleep(100);
3153
3154 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3155 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3156 msleep(100);
3157
3158 return 0;
3159}
3160
8c1c699f 3161static int pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3162{
8c1c699f
YZ
3163 int rc;
3164
3165 might_sleep();
3166
3167 if (!probe) {
fb51ccbf 3168 pci_cfg_access_lock(dev);
8c1c699f 3169 /* block PM suspend, driver probe, etc. */
8e9394ce 3170 device_lock(&dev->dev);
8c1c699f 3171 }
d91cdc74 3172
b9c3b266
DC
3173 rc = pci_dev_specific_reset(dev, probe);
3174 if (rc != -ENOTTY)
3175 goto done;
3176
8c1c699f
YZ
3177 rc = pcie_flr(dev, probe);
3178 if (rc != -ENOTTY)
3179 goto done;
d91cdc74 3180
8c1c699f 3181 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3182 if (rc != -ENOTTY)
3183 goto done;
3184
3185 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3186 if (rc != -ENOTTY)
3187 goto done;
3188
3189 rc = pci_parent_bus_reset(dev, probe);
8c1c699f
YZ
3190done:
3191 if (!probe) {
8e9394ce 3192 device_unlock(&dev->dev);
fb51ccbf 3193 pci_cfg_access_unlock(dev);
8c1c699f 3194 }
1ca88797 3195
8c1c699f 3196 return rc;
d91cdc74
SY
3197}
3198
3199/**
8c1c699f
YZ
3200 * __pci_reset_function - reset a PCI device function
3201 * @dev: PCI device to reset
d91cdc74
SY
3202 *
3203 * Some devices allow an individual function to be reset without affecting
3204 * other functions in the same device. The PCI device must be responsive
3205 * to PCI config space in order to use this function.
3206 *
3207 * The device function is presumed to be unused when this function is called.
3208 * Resetting the device will make the contents of PCI configuration space
3209 * random, so any caller of this must be prepared to reinitialise the
3210 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3211 * etc.
3212 *
8c1c699f 3213 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3214 * device doesn't support resetting a single function.
3215 */
8c1c699f 3216int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3217{
8c1c699f 3218 return pci_dev_reset(dev, 0);
d91cdc74 3219}
8c1c699f 3220EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3221
6fbf9e7a
KRW
3222/**
3223 * __pci_reset_function_locked - reset a PCI device function while holding
3224 * the @dev mutex lock.
3225 * @dev: PCI device to reset
3226 *
3227 * Some devices allow an individual function to be reset without affecting
3228 * other functions in the same device. The PCI device must be responsive
3229 * to PCI config space in order to use this function.
3230 *
3231 * The device function is presumed to be unused and the caller is holding
3232 * the device mutex lock when this function is called.
3233 * Resetting the device will make the contents of PCI configuration space
3234 * random, so any caller of this must be prepared to reinitialise the
3235 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3236 * etc.
3237 *
3238 * Returns 0 if the device function was successfully reset or negative if the
3239 * device doesn't support resetting a single function.
3240 */
3241int __pci_reset_function_locked(struct pci_dev *dev)
3242{
3243 return pci_dev_reset(dev, 1);
3244}
3245EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3246
711d5779
MT
3247/**
3248 * pci_probe_reset_function - check whether the device can be safely reset
3249 * @dev: PCI device to reset
3250 *
3251 * Some devices allow an individual function to be reset without affecting
3252 * other functions in the same device. The PCI device must be responsive
3253 * to PCI config space in order to use this function.
3254 *
3255 * Returns 0 if the device function can be reset or negative if the
3256 * device doesn't support resetting a single function.
3257 */
3258int pci_probe_reset_function(struct pci_dev *dev)
3259{
3260 return pci_dev_reset(dev, 1);
3261}
3262
8dd7f803 3263/**
8c1c699f
YZ
3264 * pci_reset_function - quiesce and reset a PCI device function
3265 * @dev: PCI device to reset
8dd7f803
SY
3266 *
3267 * Some devices allow an individual function to be reset without affecting
3268 * other functions in the same device. The PCI device must be responsive
3269 * to PCI config space in order to use this function.
3270 *
3271 * This function does not just reset the PCI portion of a device, but
3272 * clears all the state associated with the device. This function differs
8c1c699f 3273 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3274 * over the reset.
3275 *
8c1c699f 3276 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3277 * device doesn't support resetting a single function.
3278 */
3279int pci_reset_function(struct pci_dev *dev)
3280{
8c1c699f 3281 int rc;
8dd7f803 3282
8c1c699f
YZ
3283 rc = pci_dev_reset(dev, 1);
3284 if (rc)
3285 return rc;
8dd7f803 3286
8dd7f803
SY
3287 pci_save_state(dev);
3288
8c1c699f
YZ
3289 /*
3290 * both INTx and MSI are disabled after the Interrupt Disable bit
3291 * is set and the Bus Master bit is cleared.
3292 */
8dd7f803
SY
3293 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3294
8c1c699f 3295 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3296
3297 pci_restore_state(dev);
8dd7f803 3298
8c1c699f 3299 return rc;
8dd7f803
SY
3300}
3301EXPORT_SYMBOL_GPL(pci_reset_function);
3302
d556ad4b
PO
3303/**
3304 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3305 * @dev: PCI device to query
3306 *
3307 * Returns mmrbc: maximum designed memory read count in bytes
3308 * or appropriate error value.
3309 */
3310int pcix_get_max_mmrbc(struct pci_dev *dev)
3311{
7c9e2b1c 3312 int cap;
d556ad4b
PO
3313 u32 stat;
3314
3315 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3316 if (!cap)
3317 return -EINVAL;
3318
7c9e2b1c 3319 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3320 return -EINVAL;
3321
25daeb55 3322 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3323}
3324EXPORT_SYMBOL(pcix_get_max_mmrbc);
3325
3326/**
3327 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3328 * @dev: PCI device to query
3329 *
3330 * Returns mmrbc: maximum memory read count in bytes
3331 * or appropriate error value.
3332 */
3333int pcix_get_mmrbc(struct pci_dev *dev)
3334{
7c9e2b1c 3335 int cap;
bdc2bda7 3336 u16 cmd;
d556ad4b
PO
3337
3338 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3339 if (!cap)
3340 return -EINVAL;
3341
7c9e2b1c
DN
3342 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3343 return -EINVAL;
d556ad4b 3344
7c9e2b1c 3345 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3346}
3347EXPORT_SYMBOL(pcix_get_mmrbc);
3348
3349/**
3350 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3351 * @dev: PCI device to query
3352 * @mmrbc: maximum memory read count in bytes
3353 * valid values are 512, 1024, 2048, 4096
3354 *
3355 * If possible sets maximum memory read byte count, some bridges have erratas
3356 * that prevent this.
3357 */
3358int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3359{
7c9e2b1c 3360 int cap;
bdc2bda7
DN
3361 u32 stat, v, o;
3362 u16 cmd;
d556ad4b 3363
229f5afd 3364 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3365 return -EINVAL;
d556ad4b
PO
3366
3367 v = ffs(mmrbc) - 10;
3368
3369 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3370 if (!cap)
7c9e2b1c 3371 return -EINVAL;
d556ad4b 3372
7c9e2b1c
DN
3373 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3374 return -EINVAL;
d556ad4b
PO
3375
3376 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3377 return -E2BIG;
3378
7c9e2b1c
DN
3379 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3380 return -EINVAL;
d556ad4b
PO
3381
3382 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3383 if (o != v) {
3384 if (v > o && dev->bus &&
3385 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3386 return -EIO;
3387
3388 cmd &= ~PCI_X_CMD_MAX_READ;
3389 cmd |= v << 2;
7c9e2b1c
DN
3390 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3391 return -EIO;
d556ad4b 3392 }
7c9e2b1c 3393 return 0;
d556ad4b
PO
3394}
3395EXPORT_SYMBOL(pcix_set_mmrbc);
3396
3397/**
3398 * pcie_get_readrq - get PCI Express read request size
3399 * @dev: PCI device to query
3400 *
3401 * Returns maximum memory read request in bytes
3402 * or appropriate error value.
3403 */
3404int pcie_get_readrq(struct pci_dev *dev)
3405{
3406 int ret, cap;
3407 u16 ctl;
3408
06a1cbaf 3409 cap = pci_pcie_cap(dev);
d556ad4b
PO
3410 if (!cap)
3411 return -EINVAL;
3412
3413 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3414 if (!ret)
93e75fab 3415 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3416
3417 return ret;
3418}
3419EXPORT_SYMBOL(pcie_get_readrq);
3420
3421/**
3422 * pcie_set_readrq - set PCI Express maximum memory read request
3423 * @dev: PCI device to query
42e61f4a 3424 * @rq: maximum memory read count in bytes
d556ad4b
PO
3425 * valid values are 128, 256, 512, 1024, 2048, 4096
3426 *
c9b378c7 3427 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3428 */
3429int pcie_set_readrq(struct pci_dev *dev, int rq)
3430{
3431 int cap, err = -EINVAL;
3432 u16 ctl, v;
3433
229f5afd 3434 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
d556ad4b
PO
3435 goto out;
3436
06a1cbaf 3437 cap = pci_pcie_cap(dev);
d556ad4b
PO
3438 if (!cap)
3439 goto out;
3440
3441 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3442 if (err)
3443 goto out;
a1c473aa
BH
3444 /*
3445 * If using the "performance" PCIe config, we clamp the
3446 * read rq size to the max packet size to prevent the
3447 * host bridge generating requests larger than we can
3448 * cope with
3449 */
3450 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3451 int mps = pcie_get_mps(dev);
3452
3453 if (mps < 0)
3454 return mps;
3455 if (mps < rq)
3456 rq = mps;
3457 }
3458
3459 v = (ffs(rq) - 8) << 12;
d556ad4b
PO
3460
3461 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3462 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3463 ctl |= v;
c9b378c7 3464 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
d556ad4b
PO
3465 }
3466
3467out:
3468 return err;
3469}
3470EXPORT_SYMBOL(pcie_set_readrq);
3471
b03e7495
JM
3472/**
3473 * pcie_get_mps - get PCI Express maximum payload size
3474 * @dev: PCI device to query
3475 *
3476 * Returns maximum payload size in bytes
3477 * or appropriate error value.
3478 */
3479int pcie_get_mps(struct pci_dev *dev)
3480{
3481 int ret, cap;
3482 u16 ctl;
3483
3484 cap = pci_pcie_cap(dev);
3485 if (!cap)
3486 return -EINVAL;
3487
3488 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3489 if (!ret)
3490 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3491
3492 return ret;
3493}
3494
3495/**
3496 * pcie_set_mps - set PCI Express maximum payload size
3497 * @dev: PCI device to query
47c08f31 3498 * @mps: maximum payload size in bytes
b03e7495
JM
3499 * valid values are 128, 256, 512, 1024, 2048, 4096
3500 *
3501 * If possible sets maximum payload size
3502 */
3503int pcie_set_mps(struct pci_dev *dev, int mps)
3504{
3505 int cap, err = -EINVAL;
3506 u16 ctl, v;
3507
3508 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3509 goto out;
3510
3511 v = ffs(mps) - 8;
3512 if (v > dev->pcie_mpss)
3513 goto out;
3514 v <<= 5;
3515
3516 cap = pci_pcie_cap(dev);
3517 if (!cap)
3518 goto out;
3519
3520 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3521 if (err)
3522 goto out;
3523
3524 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3525 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3526 ctl |= v;
3527 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3528 }
3529out:
3530 return err;
3531}
3532
c87deff7
HS
3533/**
3534 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3535 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3536 * @flags: resource type mask to be selected
3537 *
3538 * This helper routine makes bar mask from the type of resource.
3539 */
3540int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3541{
3542 int i, bars = 0;
3543 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3544 if (pci_resource_flags(dev, i) & flags)
3545 bars |= (1 << i);
3546 return bars;
3547}
3548
613e7ed6
YZ
3549/**
3550 * pci_resource_bar - get position of the BAR associated with a resource
3551 * @dev: the PCI device
3552 * @resno: the resource number
3553 * @type: the BAR type to be filled in
3554 *
3555 * Returns BAR position in config space, or 0 if the BAR is invalid.
3556 */
3557int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3558{
d1b054da
YZ
3559 int reg;
3560
613e7ed6
YZ
3561 if (resno < PCI_ROM_RESOURCE) {
3562 *type = pci_bar_unknown;
3563 return PCI_BASE_ADDRESS_0 + 4 * resno;
3564 } else if (resno == PCI_ROM_RESOURCE) {
3565 *type = pci_bar_mem32;
3566 return dev->rom_base_reg;
d1b054da
YZ
3567 } else if (resno < PCI_BRIDGE_RESOURCES) {
3568 /* device specific resource */
3569 reg = pci_iov_resource_bar(dev, resno, type);
3570 if (reg)
3571 return reg;
613e7ed6
YZ
3572 }
3573
865df576 3574 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3575 return 0;
3576}
3577
95a8b6ef
MT
3578/* Some architectures require additional programming to enable VGA */
3579static arch_set_vga_state_t arch_set_vga_state;
3580
3581void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3582{
3583 arch_set_vga_state = func; /* NULL disables */
3584}
3585
3586static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3587 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3588{
3589 if (arch_set_vga_state)
3590 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3591 flags);
95a8b6ef
MT
3592 return 0;
3593}
3594
deb2d2ec
BH
3595/**
3596 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3597 * @dev: the PCI device
3598 * @decode: true = enable decoding, false = disable decoding
3599 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3600 * @flags: traverse ancestors and change bridges
3448a19d 3601 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3602 */
3603int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3604 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3605{
3606 struct pci_bus *bus;
3607 struct pci_dev *bridge;
3608 u16 cmd;
95a8b6ef 3609 int rc;
deb2d2ec 3610
3448a19d 3611 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3612
95a8b6ef 3613 /* ARCH specific VGA enables */
3448a19d 3614 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3615 if (rc)
3616 return rc;
3617
3448a19d
DA
3618 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3619 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3620 if (decode == true)
3621 cmd |= command_bits;
3622 else
3623 cmd &= ~command_bits;
3624 pci_write_config_word(dev, PCI_COMMAND, cmd);
3625 }
deb2d2ec 3626
3448a19d 3627 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3628 return 0;
3629
3630 bus = dev->bus;
3631 while (bus) {
3632 bridge = bus->self;
3633 if (bridge) {
3634 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3635 &cmd);
3636 if (decode == true)
3637 cmd |= PCI_BRIDGE_CTL_VGA;
3638 else
3639 cmd &= ~PCI_BRIDGE_CTL_VGA;
3640 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3641 cmd);
3642 }
3643 bus = bus->parent;
3644 }
3645 return 0;
3646}
3647
32a9a682
YS
3648#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3649static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3650static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3651
3652/**
3653 * pci_specified_resource_alignment - get resource alignment specified by user.
3654 * @dev: the PCI device to get
3655 *
3656 * RETURNS: Resource alignment if it is specified.
3657 * Zero if it is not specified.
3658 */
3659resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3660{
3661 int seg, bus, slot, func, align_order, count;
3662 resource_size_t align = 0;
3663 char *p;
3664
3665 spin_lock(&resource_alignment_lock);
3666 p = resource_alignment_param;
3667 while (*p) {
3668 count = 0;
3669 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3670 p[count] == '@') {
3671 p += count + 1;
3672 } else {
3673 align_order = -1;
3674 }
3675 if (sscanf(p, "%x:%x:%x.%x%n",
3676 &seg, &bus, &slot, &func, &count) != 4) {
3677 seg = 0;
3678 if (sscanf(p, "%x:%x.%x%n",
3679 &bus, &slot, &func, &count) != 3) {
3680 /* Invalid format */
3681 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3682 p);
3683 break;
3684 }
3685 }
3686 p += count;
3687 if (seg == pci_domain_nr(dev->bus) &&
3688 bus == dev->bus->number &&
3689 slot == PCI_SLOT(dev->devfn) &&
3690 func == PCI_FUNC(dev->devfn)) {
3691 if (align_order == -1) {
3692 align = PAGE_SIZE;
3693 } else {
3694 align = 1 << align_order;
3695 }
3696 /* Found */
3697 break;
3698 }
3699 if (*p != ';' && *p != ',') {
3700 /* End of param or invalid format */
3701 break;
3702 }
3703 p++;
3704 }
3705 spin_unlock(&resource_alignment_lock);
3706 return align;
3707}
3708
3709/**
3710 * pci_is_reassigndev - check if specified PCI is target device to reassign
3711 * @dev: the PCI device to check
3712 *
3713 * RETURNS: non-zero for PCI device is a target device to reassign,
3714 * or zero is not.
3715 */
3716int pci_is_reassigndev(struct pci_dev *dev)
3717{
3718 return (pci_specified_resource_alignment(dev) != 0);
3719}
3720
2069ecfb
YL
3721/*
3722 * This function disables memory decoding and releases memory resources
3723 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3724 * It also rounds up size to specified alignment.
3725 * Later on, the kernel will assign page-aligned memory resource back
3726 * to the device.
3727 */
3728void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3729{
3730 int i;
3731 struct resource *r;
3732 resource_size_t align, size;
3733 u16 command;
3734
3735 if (!pci_is_reassigndev(dev))
3736 return;
3737
3738 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3739 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3740 dev_warn(&dev->dev,
3741 "Can't reassign resources to host bridge.\n");
3742 return;
3743 }
3744
3745 dev_info(&dev->dev,
3746 "Disabling memory decoding and releasing memory resources.\n");
3747 pci_read_config_word(dev, PCI_COMMAND, &command);
3748 command &= ~PCI_COMMAND_MEMORY;
3749 pci_write_config_word(dev, PCI_COMMAND, command);
3750
3751 align = pci_specified_resource_alignment(dev);
3752 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3753 r = &dev->resource[i];
3754 if (!(r->flags & IORESOURCE_MEM))
3755 continue;
3756 size = resource_size(r);
3757 if (size < align) {
3758 size = align;
3759 dev_info(&dev->dev,
3760 "Rounding up size of resource #%d to %#llx.\n",
3761 i, (unsigned long long)size);
3762 }
3763 r->end = size - 1;
3764 r->start = 0;
3765 }
3766 /* Need to disable bridge's resource window,
3767 * to enable the kernel to reassign new resource
3768 * window later on.
3769 */
3770 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3771 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3772 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3773 r = &dev->resource[i];
3774 if (!(r->flags & IORESOURCE_MEM))
3775 continue;
3776 r->end = resource_size(r) - 1;
3777 r->start = 0;
3778 }
3779 pci_disable_bridge_window(dev);
3780 }
3781}
3782
32a9a682
YS
3783ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3784{
3785 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3786 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3787 spin_lock(&resource_alignment_lock);
3788 strncpy(resource_alignment_param, buf, count);
3789 resource_alignment_param[count] = '\0';
3790 spin_unlock(&resource_alignment_lock);
3791 return count;
3792}
3793
3794ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3795{
3796 size_t count;
3797 spin_lock(&resource_alignment_lock);
3798 count = snprintf(buf, size, "%s", resource_alignment_param);
3799 spin_unlock(&resource_alignment_lock);
3800 return count;
3801}
3802
3803static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3804{
3805 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3806}
3807
3808static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3809 const char *buf, size_t count)
3810{
3811 return pci_set_resource_alignment_param(buf, count);
3812}
3813
3814BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3815 pci_resource_alignment_store);
3816
3817static int __init pci_resource_alignment_sysfs_init(void)
3818{
3819 return bus_create_file(&pci_bus_type,
3820 &bus_attr_resource_alignment);
3821}
3822
3823late_initcall(pci_resource_alignment_sysfs_init);
3824
32a2eea7
JG
3825static void __devinit pci_no_domains(void)
3826{
3827#ifdef CONFIG_PCI_DOMAINS
3828 pci_domains_supported = 0;
3829#endif
3830}
3831
0ef5f8f6
AP
3832/**
3833 * pci_ext_cfg_enabled - can we access extended PCI config space?
3834 * @dev: The PCI device of the root bridge.
3835 *
3836 * Returns 1 if we can access PCI extended config space (offsets
3837 * greater than 0xff). This is the default implementation. Architecture
3838 * implementations can override this.
3839 */
3840int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3841{
3842 return 1;
3843}
3844
2d1c8618
BH
3845void __weak pci_fixup_cardbus(struct pci_bus *bus)
3846{
3847}
3848EXPORT_SYMBOL(pci_fixup_cardbus);
3849
ad04d31e 3850static int __init pci_setup(char *str)
1da177e4
LT
3851{
3852 while (str) {
3853 char *k = strchr(str, ',');
3854 if (k)
3855 *k++ = 0;
3856 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
3857 if (!strcmp(str, "nomsi")) {
3858 pci_no_msi();
7f785763
RD
3859 } else if (!strcmp(str, "noaer")) {
3860 pci_no_aer();
b55438fd
YL
3861 } else if (!strncmp(str, "realloc=", 8)) {
3862 pci_realloc_get_opt(str + 8);
f483d392 3863 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 3864 pci_realloc_get_opt("on");
32a2eea7
JG
3865 } else if (!strcmp(str, "nodomains")) {
3866 pci_no_domains();
6748dcc2
RW
3867 } else if (!strncmp(str, "noari", 5)) {
3868 pcie_ari_disabled = true;
4516a618
AN
3869 } else if (!strncmp(str, "cbiosize=", 9)) {
3870 pci_cardbus_io_size = memparse(str + 9, &str);
3871 } else if (!strncmp(str, "cbmemsize=", 10)) {
3872 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
3873 } else if (!strncmp(str, "resource_alignment=", 19)) {
3874 pci_set_resource_alignment_param(str + 19,
3875 strlen(str + 19));
43c16408
AP
3876 } else if (!strncmp(str, "ecrc=", 5)) {
3877 pcie_ecrc_get_policy(str + 5);
28760489
EB
3878 } else if (!strncmp(str, "hpiosize=", 9)) {
3879 pci_hotplug_io_size = memparse(str + 9, &str);
3880 } else if (!strncmp(str, "hpmemsize=", 10)) {
3881 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
3882 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3883 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
3884 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3885 pcie_bus_config = PCIE_BUS_SAFE;
3886 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3887 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
3888 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3889 pcie_bus_config = PCIE_BUS_PEER2PEER;
309e57df
MW
3890 } else {
3891 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3892 str);
3893 }
1da177e4
LT
3894 }
3895 str = k;
3896 }
0637a70a 3897 return 0;
1da177e4 3898}
0637a70a 3899early_param("pci", pci_setup);
1da177e4 3900
0b62e13b 3901EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
3902EXPORT_SYMBOL(pci_enable_device_io);
3903EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 3904EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
3905EXPORT_SYMBOL(pcim_enable_device);
3906EXPORT_SYMBOL(pcim_pin_device);
1da177e4 3907EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
3908EXPORT_SYMBOL(pci_find_capability);
3909EXPORT_SYMBOL(pci_bus_find_capability);
3910EXPORT_SYMBOL(pci_release_regions);
3911EXPORT_SYMBOL(pci_request_regions);
e8de1481 3912EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
3913EXPORT_SYMBOL(pci_release_region);
3914EXPORT_SYMBOL(pci_request_region);
e8de1481 3915EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
3916EXPORT_SYMBOL(pci_release_selected_regions);
3917EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3918EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 3919EXPORT_SYMBOL(pci_set_master);
6a479079 3920EXPORT_SYMBOL(pci_clear_master);
1da177e4 3921EXPORT_SYMBOL(pci_set_mwi);
694625c0 3922EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 3923EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 3924EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
3925EXPORT_SYMBOL(pci_assign_resource);
3926EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 3927EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
3928
3929EXPORT_SYMBOL(pci_set_power_state);
3930EXPORT_SYMBOL(pci_save_state);
3931EXPORT_SYMBOL(pci_restore_state);
e5899e1b 3932EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 3933EXPORT_SYMBOL(pci_pme_active);
0235c4fc 3934EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 3935EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
3936EXPORT_SYMBOL(pci_prepare_to_sleep);
3937EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 3938EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
This page took 1.327388 seconds and 4 git commands to generate.