PCI: msi: fix imbalanced refcount of msi irq sysfs objects
[deliverable/linux.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
32a9a682 25#include <asm/setup.h>
bc56b9e0 26#include "pci.h"
1da177e4 27
00240c38
AS
28const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30};
31EXPORT_SYMBOL_GPL(pci_power_names);
32
93177a74
RW
33int isa_dma_bridge_buggy;
34EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36int pci_pci_problems;
37EXPORT_SYMBOL(pci_pci_problems);
38
1ae861e6
RW
39unsigned int pci_pm_d3_delay;
40
df17e62e
MG
41static void pci_pme_list_scan(struct work_struct *work);
42
43static LIST_HEAD(pci_pme_list);
44static DEFINE_MUTEX(pci_pme_list_mutex);
45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50};
51
52#define PME_TIMEOUT 1000 /* How long between PME checks */
53
1ae861e6
RW
54static void pci_dev_d3_sleep(struct pci_dev *dev)
55{
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62}
1da177e4 63
32a2eea7
JG
64#ifdef CONFIG_PCI_DOMAINS
65int pci_domains_supported = 1;
66#endif
67
4516a618
AN
68#define DEFAULT_CARDBUS_IO_SIZE (256)
69#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
28760489
EB
74#define DEFAULT_HOTPLUG_IO_SIZE (256)
75#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
5f39e670 80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 81
ac1aa47b
JB
82/*
83 * The default CLS is used if arch didn't set CLS explicitly and not
84 * all pci devices agree on the same value. Arch can override either
85 * the dfl or actual value as it sees fit. Don't forget this is
86 * measured in 32-bit words, not bytes.
87 */
98e724c7 88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
89u8 pci_cache_line_size;
90
96c55900
MS
91/*
92 * If we set up a device for bus mastering, we need to check the latency
93 * timer as certain BIOSes forget to set it properly.
94 */
95unsigned int pcibios_max_latency = 255;
96
1da177e4
LT
97/**
98 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
99 * @bus: pointer to PCI bus structure to search
100 *
101 * Given a PCI bus, returns the highest PCI bus number present in the set
102 * including the given PCI bus and its list of child PCI buses.
103 */
96bde06a 104unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
105{
106 struct list_head *tmp;
107 unsigned char max, n;
108
b82db5ce 109 max = bus->subordinate;
1da177e4
LT
110 list_for_each(tmp, &bus->children) {
111 n = pci_bus_max_busnr(pci_bus_b(tmp));
112 if(n > max)
113 max = n;
114 }
115 return max;
116}
b82db5ce 117EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 118
1684f5dd
AM
119#ifdef CONFIG_HAS_IOMEM
120void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
121{
122 /*
123 * Make sure the BAR is actually a memory resource, not an IO resource
124 */
125 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
126 WARN_ON(1);
127 return NULL;
128 }
129 return ioremap_nocache(pci_resource_start(pdev, bar),
130 pci_resource_len(pdev, bar));
131}
132EXPORT_SYMBOL_GPL(pci_ioremap_bar);
133#endif
134
b82db5ce 135#if 0
1da177e4
LT
136/**
137 * pci_max_busnr - returns maximum PCI bus number
138 *
139 * Returns the highest PCI bus number present in the system global list of
140 * PCI buses.
141 */
142unsigned char __devinit
143pci_max_busnr(void)
144{
145 struct pci_bus *bus = NULL;
146 unsigned char max, n;
147
148 max = 0;
149 while ((bus = pci_find_next_bus(bus)) != NULL) {
150 n = pci_bus_max_busnr(bus);
151 if(n > max)
152 max = n;
153 }
154 return max;
155}
156
54c762fe
AB
157#endif /* 0 */
158
687d5fe3
ME
159#define PCI_FIND_CAP_TTL 48
160
161static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
162 u8 pos, int cap, int *ttl)
24a4e377
RD
163{
164 u8 id;
24a4e377 165
687d5fe3 166 while ((*ttl)--) {
24a4e377
RD
167 pci_bus_read_config_byte(bus, devfn, pos, &pos);
168 if (pos < 0x40)
169 break;
170 pos &= ~3;
171 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
172 &id);
173 if (id == 0xff)
174 break;
175 if (id == cap)
176 return pos;
177 pos += PCI_CAP_LIST_NEXT;
178 }
179 return 0;
180}
181
687d5fe3
ME
182static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
183 u8 pos, int cap)
184{
185 int ttl = PCI_FIND_CAP_TTL;
186
187 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
188}
189
24a4e377
RD
190int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
191{
192 return __pci_find_next_cap(dev->bus, dev->devfn,
193 pos + PCI_CAP_LIST_NEXT, cap);
194}
195EXPORT_SYMBOL_GPL(pci_find_next_capability);
196
d3bac118
ME
197static int __pci_bus_find_cap_start(struct pci_bus *bus,
198 unsigned int devfn, u8 hdr_type)
1da177e4
LT
199{
200 u16 status;
1da177e4
LT
201
202 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
203 if (!(status & PCI_STATUS_CAP_LIST))
204 return 0;
205
206 switch (hdr_type) {
207 case PCI_HEADER_TYPE_NORMAL:
208 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 209 return PCI_CAPABILITY_LIST;
1da177e4 210 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 211 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
212 default:
213 return 0;
214 }
d3bac118
ME
215
216 return 0;
1da177e4
LT
217}
218
219/**
220 * pci_find_capability - query for devices' capabilities
221 * @dev: PCI device to query
222 * @cap: capability code
223 *
224 * Tell if a device supports a given PCI capability.
225 * Returns the address of the requested capability structure within the
226 * device's PCI configuration space or 0 in case the device does not
227 * support it. Possible values for @cap:
228 *
229 * %PCI_CAP_ID_PM Power Management
230 * %PCI_CAP_ID_AGP Accelerated Graphics Port
231 * %PCI_CAP_ID_VPD Vital Product Data
232 * %PCI_CAP_ID_SLOTID Slot Identification
233 * %PCI_CAP_ID_MSI Message Signalled Interrupts
234 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
235 * %PCI_CAP_ID_PCIX PCI-X
236 * %PCI_CAP_ID_EXP PCI Express
237 */
238int pci_find_capability(struct pci_dev *dev, int cap)
239{
d3bac118
ME
240 int pos;
241
242 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
243 if (pos)
244 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
245
246 return pos;
1da177e4
LT
247}
248
249/**
250 * pci_bus_find_capability - query for devices' capabilities
251 * @bus: the PCI bus to query
252 * @devfn: PCI device to query
253 * @cap: capability code
254 *
255 * Like pci_find_capability() but works for pci devices that do not have a
256 * pci_dev structure set up yet.
257 *
258 * Returns the address of the requested capability structure within the
259 * device's PCI configuration space or 0 in case the device does not
260 * support it.
261 */
262int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
263{
d3bac118 264 int pos;
1da177e4
LT
265 u8 hdr_type;
266
267 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
268
d3bac118
ME
269 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
270 if (pos)
271 pos = __pci_find_next_cap(bus, devfn, pos, cap);
272
273 return pos;
1da177e4
LT
274}
275
276/**
277 * pci_find_ext_capability - Find an extended capability
278 * @dev: PCI device to query
279 * @cap: capability code
280 *
281 * Returns the address of the requested extended capability structure
282 * within the device's PCI configuration space or 0 if the device does
283 * not support it. Possible values for @cap:
284 *
285 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
286 * %PCI_EXT_CAP_ID_VC Virtual Channel
287 * %PCI_EXT_CAP_ID_DSN Device Serial Number
288 * %PCI_EXT_CAP_ID_PWR Power Budgeting
289 */
290int pci_find_ext_capability(struct pci_dev *dev, int cap)
291{
292 u32 header;
557848c3
ZY
293 int ttl;
294 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 295
557848c3
ZY
296 /* minimum 8 bytes per capability */
297 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
298
299 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
300 return 0;
301
302 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
303 return 0;
304
305 /*
306 * If we have no capabilities, this is indicated by cap ID,
307 * cap version and next pointer all being 0.
308 */
309 if (header == 0)
310 return 0;
311
312 while (ttl-- > 0) {
313 if (PCI_EXT_CAP_ID(header) == cap)
314 return pos;
315
316 pos = PCI_EXT_CAP_NEXT(header);
557848c3 317 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
318 break;
319
320 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
321 break;
322 }
323
324 return 0;
325}
3a720d72 326EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 327
cf4c43dd
JB
328/**
329 * pci_bus_find_ext_capability - find an extended capability
330 * @bus: the PCI bus to query
331 * @devfn: PCI device to query
332 * @cap: capability code
333 *
334 * Like pci_find_ext_capability() but works for pci devices that do not have a
335 * pci_dev structure set up yet.
336 *
337 * Returns the address of the requested capability structure within the
338 * device's PCI configuration space or 0 in case the device does not
339 * support it.
340 */
341int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
342 int cap)
343{
344 u32 header;
345 int ttl;
346 int pos = PCI_CFG_SPACE_SIZE;
347
348 /* minimum 8 bytes per capability */
349 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
350
351 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
352 return 0;
353 if (header == 0xffffffff || header == 0)
354 return 0;
355
356 while (ttl-- > 0) {
357 if (PCI_EXT_CAP_ID(header) == cap)
358 return pos;
359
360 pos = PCI_EXT_CAP_NEXT(header);
361 if (pos < PCI_CFG_SPACE_SIZE)
362 break;
363
364 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
365 break;
366 }
367
368 return 0;
369}
370
687d5fe3
ME
371static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
372{
373 int rc, ttl = PCI_FIND_CAP_TTL;
374 u8 cap, mask;
375
376 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
377 mask = HT_3BIT_CAP_MASK;
378 else
379 mask = HT_5BIT_CAP_MASK;
380
381 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
382 PCI_CAP_ID_HT, &ttl);
383 while (pos) {
384 rc = pci_read_config_byte(dev, pos + 3, &cap);
385 if (rc != PCIBIOS_SUCCESSFUL)
386 return 0;
387
388 if ((cap & mask) == ht_cap)
389 return pos;
390
47a4d5be
BG
391 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
392 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
393 PCI_CAP_ID_HT, &ttl);
394 }
395
396 return 0;
397}
398/**
399 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
400 * @dev: PCI device to query
401 * @pos: Position from which to continue searching
402 * @ht_cap: Hypertransport capability code
403 *
404 * To be used in conjunction with pci_find_ht_capability() to search for
405 * all capabilities matching @ht_cap. @pos should always be a value returned
406 * from pci_find_ht_capability().
407 *
408 * NB. To be 100% safe against broken PCI devices, the caller should take
409 * steps to avoid an infinite loop.
410 */
411int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
412{
413 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
414}
415EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
416
417/**
418 * pci_find_ht_capability - query a device's Hypertransport capabilities
419 * @dev: PCI device to query
420 * @ht_cap: Hypertransport capability code
421 *
422 * Tell if a device supports a given Hypertransport capability.
423 * Returns an address within the device's PCI configuration space
424 * or 0 in case the device does not support the request capability.
425 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
426 * which has a Hypertransport capability matching @ht_cap.
427 */
428int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
429{
430 int pos;
431
432 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
433 if (pos)
434 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
435
436 return pos;
437}
438EXPORT_SYMBOL_GPL(pci_find_ht_capability);
439
1da177e4
LT
440/**
441 * pci_find_parent_resource - return resource region of parent bus of given region
442 * @dev: PCI device structure contains resources to be searched
443 * @res: child resource record for which parent is sought
444 *
445 * For given resource region of given device, return the resource
446 * region of parent bus the given region is contained in or where
447 * it should be allocated from.
448 */
449struct resource *
450pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
451{
452 const struct pci_bus *bus = dev->bus;
453 int i;
89a74ecc 454 struct resource *best = NULL, *r;
1da177e4 455
89a74ecc 456 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
457 if (!r)
458 continue;
459 if (res->start && !(res->start >= r->start && res->end <= r->end))
460 continue; /* Not contained */
461 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
462 continue; /* Wrong type */
463 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
464 return r; /* Exact match */
8c8def26
LT
465 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
466 if (r->flags & IORESOURCE_PREFETCH)
467 continue;
468 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
469 if (!best)
470 best = r;
1da177e4
LT
471 }
472 return best;
473}
474
064b53db
JL
475/**
476 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
477 * @dev: PCI device to have its BARs restored
478 *
479 * Restore the BAR values for a given device, so as to make it
480 * accessible by its driver.
481 */
ad668599 482static void
064b53db
JL
483pci_restore_bars(struct pci_dev *dev)
484{
bc5f5a82 485 int i;
064b53db 486
bc5f5a82 487 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 488 pci_update_resource(dev, i);
064b53db
JL
489}
490
961d9120
RW
491static struct pci_platform_pm_ops *pci_platform_pm;
492
493int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
494{
eb9d0fe4
RW
495 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
496 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
497 return -EINVAL;
498 pci_platform_pm = ops;
499 return 0;
500}
501
502static inline bool platform_pci_power_manageable(struct pci_dev *dev)
503{
504 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
505}
506
507static inline int platform_pci_set_power_state(struct pci_dev *dev,
508 pci_power_t t)
509{
510 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
511}
512
513static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
514{
515 return pci_platform_pm ?
516 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
517}
8f7020d3 518
eb9d0fe4
RW
519static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
520{
521 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
522}
523
524static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
525{
526 return pci_platform_pm ?
527 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
528}
529
b67ea761
RW
530static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
531{
532 return pci_platform_pm ?
533 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
534}
535
1da177e4 536/**
44e4e66e
RW
537 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
538 * given PCI device
539 * @dev: PCI device to handle.
44e4e66e 540 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 541 *
44e4e66e
RW
542 * RETURN VALUE:
543 * -EINVAL if the requested state is invalid.
544 * -EIO if device does not support PCI PM or its PM capabilities register has a
545 * wrong version, or device doesn't support the requested state.
546 * 0 if device already is in the requested state.
547 * 0 if device's power state has been successfully changed.
1da177e4 548 */
f00a20ef 549static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 550{
337001b6 551 u16 pmcsr;
44e4e66e 552 bool need_restore = false;
1da177e4 553
4a865905
RW
554 /* Check if we're already there */
555 if (dev->current_state == state)
556 return 0;
557
337001b6 558 if (!dev->pm_cap)
cca03dec
AL
559 return -EIO;
560
44e4e66e
RW
561 if (state < PCI_D0 || state > PCI_D3hot)
562 return -EINVAL;
563
1da177e4
LT
564 /* Validate current state:
565 * Can enter D0 from any state, but if we can only go deeper
566 * to sleep if we're already in a low power state
567 */
4a865905 568 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 569 && dev->current_state > state) {
80ccba11
BH
570 dev_err(&dev->dev, "invalid power transition "
571 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 572 return -EINVAL;
44e4e66e 573 }
1da177e4 574
1da177e4 575 /* check if this device supports the desired state */
337001b6
RW
576 if ((state == PCI_D1 && !dev->d1_support)
577 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 578 return -EIO;
1da177e4 579
337001b6 580 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 581
32a36585 582 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
583 * This doesn't affect PME_Status, disables PME_En, and
584 * sets PowerState to 0.
585 */
32a36585 586 switch (dev->current_state) {
d3535fbb
JL
587 case PCI_D0:
588 case PCI_D1:
589 case PCI_D2:
590 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
591 pmcsr |= state;
592 break;
f62795f1
RW
593 case PCI_D3hot:
594 case PCI_D3cold:
32a36585
JL
595 case PCI_UNKNOWN: /* Boot-up */
596 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 597 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 598 need_restore = true;
32a36585 599 /* Fall-through: force to D0 */
32a36585 600 default:
d3535fbb 601 pmcsr = 0;
32a36585 602 break;
1da177e4
LT
603 }
604
605 /* enter specified state */
337001b6 606 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
607
608 /* Mandatory power management transition delays */
609 /* see PCI PM 1.1 5.6.1 table 18 */
610 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 611 pci_dev_d3_sleep(dev);
1da177e4 612 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 613 udelay(PCI_PM_D2_DELAY);
1da177e4 614
e13cdbd7
RW
615 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
616 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
617 if (dev->current_state != state && printk_ratelimit())
618 dev_info(&dev->dev, "Refused to change power state, "
619 "currently in D%d\n", dev->current_state);
064b53db
JL
620
621 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
622 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
623 * from D3hot to D0 _may_ perform an internal reset, thereby
624 * going to "D0 Uninitialized" rather than "D0 Initialized".
625 * For example, at least some versions of the 3c905B and the
626 * 3c556B exhibit this behaviour.
627 *
628 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
629 * devices in a D3hot state at boot. Consequently, we need to
630 * restore at least the BARs so that the device will be
631 * accessible to its driver.
632 */
633 if (need_restore)
634 pci_restore_bars(dev);
635
f00a20ef 636 if (dev->bus->self)
7d715a6c
SL
637 pcie_aspm_pm_state_change(dev->bus->self);
638
1da177e4
LT
639 return 0;
640}
641
44e4e66e
RW
642/**
643 * pci_update_current_state - Read PCI power state of given device from its
644 * PCI PM registers and cache it
645 * @dev: PCI device to handle.
f06fc0b6 646 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 647 */
73410429 648void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 649{
337001b6 650 if (dev->pm_cap) {
44e4e66e
RW
651 u16 pmcsr;
652
337001b6 653 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 654 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
655 } else {
656 dev->current_state = state;
44e4e66e
RW
657 }
658}
659
0e5dd46b
RW
660/**
661 * pci_platform_power_transition - Use platform to change device power state
662 * @dev: PCI device to handle.
663 * @state: State to put the device into.
664 */
665static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
666{
667 int error;
668
669 if (platform_pci_power_manageable(dev)) {
670 error = platform_pci_set_power_state(dev, state);
671 if (!error)
672 pci_update_current_state(dev, state);
b51306c6
AH
673 /* Fall back to PCI_D0 if native PM is not supported */
674 if (!dev->pm_cap)
675 dev->current_state = PCI_D0;
0e5dd46b
RW
676 } else {
677 error = -ENODEV;
678 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
679 if (!dev->pm_cap)
680 dev->current_state = PCI_D0;
0e5dd46b
RW
681 }
682
683 return error;
684}
685
686/**
687 * __pci_start_power_transition - Start power transition of a PCI device
688 * @dev: PCI device to handle.
689 * @state: State to put the device into.
690 */
691static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
692{
693 if (state == PCI_D0)
694 pci_platform_power_transition(dev, PCI_D0);
695}
696
697/**
698 * __pci_complete_power_transition - Complete power transition of a PCI device
699 * @dev: PCI device to handle.
700 * @state: State to put the device into.
701 *
702 * This function should not be called directly by device drivers.
703 */
704int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
705{
cc2893b6 706 return state >= PCI_D0 ?
0e5dd46b
RW
707 pci_platform_power_transition(dev, state) : -EINVAL;
708}
709EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
710
44e4e66e
RW
711/**
712 * pci_set_power_state - Set the power state of a PCI device
713 * @dev: PCI device to handle.
714 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
715 *
877d0310 716 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
717 * the device's PCI PM registers.
718 *
719 * RETURN VALUE:
720 * -EINVAL if the requested state is invalid.
721 * -EIO if device does not support PCI PM or its PM capabilities register has a
722 * wrong version, or device doesn't support the requested state.
723 * 0 if device already is in the requested state.
724 * 0 if device's power state has been successfully changed.
725 */
726int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
727{
337001b6 728 int error;
44e4e66e
RW
729
730 /* bound the state we're entering */
731 if (state > PCI_D3hot)
732 state = PCI_D3hot;
733 else if (state < PCI_D0)
734 state = PCI_D0;
735 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
736 /*
737 * If the device or the parent bridge do not support PCI PM,
738 * ignore the request if we're doing anything other than putting
739 * it into D0 (which would only happen on boot).
740 */
741 return 0;
742
0e5dd46b
RW
743 __pci_start_power_transition(dev, state);
744
979b1791
AC
745 /* This device is quirked not to be put into D3, so
746 don't put it in D3 */
747 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
748 return 0;
44e4e66e 749
f00a20ef 750 error = pci_raw_set_power_state(dev, state);
44e4e66e 751
0e5dd46b
RW
752 if (!__pci_complete_power_transition(dev, state))
753 error = 0;
1a680b7c
NC
754 /*
755 * When aspm_policy is "powersave" this call ensures
756 * that ASPM is configured.
757 */
758 if (!error && dev->bus->self)
759 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
760
761 return error;
762}
763
1da177e4
LT
764/**
765 * pci_choose_state - Choose the power state of a PCI device
766 * @dev: PCI device to be suspended
767 * @state: target sleep state for the whole system. This is the value
768 * that is passed to suspend() function.
769 *
770 * Returns PCI power state suitable for given device and given system
771 * message.
772 */
773
774pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
775{
ab826ca4 776 pci_power_t ret;
0f64474b 777
1da177e4
LT
778 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
779 return PCI_D0;
780
961d9120
RW
781 ret = platform_pci_choose_state(dev);
782 if (ret != PCI_POWER_ERROR)
783 return ret;
ca078bae
PM
784
785 switch (state.event) {
786 case PM_EVENT_ON:
787 return PCI_D0;
788 case PM_EVENT_FREEZE:
b887d2e6
DB
789 case PM_EVENT_PRETHAW:
790 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 791 case PM_EVENT_SUSPEND:
3a2d5b70 792 case PM_EVENT_HIBERNATE:
ca078bae 793 return PCI_D3hot;
1da177e4 794 default:
80ccba11
BH
795 dev_info(&dev->dev, "unrecognized suspend event %d\n",
796 state.event);
1da177e4
LT
797 BUG();
798 }
799 return PCI_D0;
800}
801
802EXPORT_SYMBOL(pci_choose_state);
803
89858517
YZ
804#define PCI_EXP_SAVE_REGS 7
805
1b6b8ce2
YZ
806#define pcie_cap_has_devctl(type, flags) 1
807#define pcie_cap_has_lnkctl(type, flags) \
808 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
809 (type == PCI_EXP_TYPE_ROOT_PORT || \
810 type == PCI_EXP_TYPE_ENDPOINT || \
811 type == PCI_EXP_TYPE_LEG_END))
812#define pcie_cap_has_sltctl(type, flags) \
813 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
814 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
815 (type == PCI_EXP_TYPE_DOWNSTREAM && \
816 (flags & PCI_EXP_FLAGS_SLOT))))
817#define pcie_cap_has_rtctl(type, flags) \
818 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
819 (type == PCI_EXP_TYPE_ROOT_PORT || \
820 type == PCI_EXP_TYPE_RC_EC))
821#define pcie_cap_has_devctl2(type, flags) \
822 ((flags & PCI_EXP_FLAGS_VERS) > 1)
823#define pcie_cap_has_lnkctl2(type, flags) \
824 ((flags & PCI_EXP_FLAGS_VERS) > 1)
825#define pcie_cap_has_sltctl2(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1)
827
b56a5a23
MT
828static int pci_save_pcie_state(struct pci_dev *dev)
829{
830 int pos, i = 0;
831 struct pci_cap_saved_state *save_state;
832 u16 *cap;
1b6b8ce2 833 u16 flags;
b56a5a23 834
06a1cbaf
KK
835 pos = pci_pcie_cap(dev);
836 if (!pos)
b56a5a23
MT
837 return 0;
838
9f35575d 839 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 840 if (!save_state) {
e496b617 841 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
842 return -ENOMEM;
843 }
24a4742f 844 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 845
1b6b8ce2
YZ
846 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
847
848 if (pcie_cap_has_devctl(dev->pcie_type, flags))
849 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
850 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
851 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
852 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
853 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
854 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
855 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
856 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
857 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
858 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
859 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
860 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
861 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
63f4898a 862
b56a5a23
MT
863 return 0;
864}
865
866static void pci_restore_pcie_state(struct pci_dev *dev)
867{
868 int i = 0, pos;
869 struct pci_cap_saved_state *save_state;
870 u16 *cap;
1b6b8ce2 871 u16 flags;
b56a5a23
MT
872
873 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
874 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
875 if (!save_state || pos <= 0)
876 return;
24a4742f 877 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 878
1b6b8ce2
YZ
879 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
880
881 if (pcie_cap_has_devctl(dev->pcie_type, flags))
882 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
883 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
884 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
885 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
886 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
887 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
888 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
889 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
890 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
891 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
892 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
893 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
894 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
895}
896
cc692a5f
SH
897
898static int pci_save_pcix_state(struct pci_dev *dev)
899{
63f4898a 900 int pos;
cc692a5f 901 struct pci_cap_saved_state *save_state;
cc692a5f
SH
902
903 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
904 if (pos <= 0)
905 return 0;
906
f34303de 907 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 908 if (!save_state) {
e496b617 909 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
910 return -ENOMEM;
911 }
cc692a5f 912
24a4742f
AW
913 pci_read_config_word(dev, pos + PCI_X_CMD,
914 (u16 *)save_state->cap.data);
63f4898a 915
cc692a5f
SH
916 return 0;
917}
918
919static void pci_restore_pcix_state(struct pci_dev *dev)
920{
921 int i = 0, pos;
922 struct pci_cap_saved_state *save_state;
923 u16 *cap;
924
925 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
926 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
927 if (!save_state || pos <= 0)
928 return;
24a4742f 929 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
930
931 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
932}
933
934
1da177e4
LT
935/**
936 * pci_save_state - save the PCI configuration space of a device before suspending
937 * @dev: - PCI device that we're dealing with
1da177e4
LT
938 */
939int
940pci_save_state(struct pci_dev *dev)
941{
942 int i;
943 /* XXX: 100% dword access ok here? */
944 for (i = 0; i < 16; i++)
9e0b5b2c 945 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 946 dev->state_saved = true;
b56a5a23
MT
947 if ((i = pci_save_pcie_state(dev)) != 0)
948 return i;
cc692a5f
SH
949 if ((i = pci_save_pcix_state(dev)) != 0)
950 return i;
1da177e4
LT
951 return 0;
952}
953
954/**
955 * pci_restore_state - Restore the saved state of a PCI device
956 * @dev: - PCI device that we're dealing with
1da177e4 957 */
1d3c16a8 958void pci_restore_state(struct pci_dev *dev)
1da177e4
LT
959{
960 int i;
b4482a4b 961 u32 val;
1da177e4 962
c82f63e4 963 if (!dev->state_saved)
1d3c16a8 964 return;
4b77b0a2 965
b56a5a23
MT
966 /* PCI Express register must be restored first */
967 pci_restore_pcie_state(dev);
968
8b8c8d28
YL
969 /*
970 * The Base Address register should be programmed before the command
971 * register(s)
972 */
973 for (i = 15; i >= 0; i--) {
04d9c1a1
DJ
974 pci_read_config_dword(dev, i * 4, &val);
975 if (val != dev->saved_config_space[i]) {
85b8582d 976 dev_dbg(&dev->dev, "restoring config "
80ccba11
BH
977 "space at offset %#x (was %#x, writing %#x)\n",
978 i, val, (int)dev->saved_config_space[i]);
04d9c1a1
DJ
979 pci_write_config_dword(dev,i * 4,
980 dev->saved_config_space[i]);
981 }
982 }
cc692a5f 983 pci_restore_pcix_state(dev);
41017f0c 984 pci_restore_msi_state(dev);
8c5cdb6a 985 pci_restore_iov_state(dev);
8fed4b65 986
4b77b0a2 987 dev->state_saved = false;
1da177e4
LT
988}
989
ffbdd3f7
AW
990struct pci_saved_state {
991 u32 config_space[16];
992 struct pci_cap_saved_data cap[0];
993};
994
995/**
996 * pci_store_saved_state - Allocate and return an opaque struct containing
997 * the device saved state.
998 * @dev: PCI device that we're dealing with
999 *
1000 * Rerturn NULL if no state or error.
1001 */
1002struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1003{
1004 struct pci_saved_state *state;
1005 struct pci_cap_saved_state *tmp;
1006 struct pci_cap_saved_data *cap;
1007 struct hlist_node *pos;
1008 size_t size;
1009
1010 if (!dev->state_saved)
1011 return NULL;
1012
1013 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1014
1015 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1016 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1017
1018 state = kzalloc(size, GFP_KERNEL);
1019 if (!state)
1020 return NULL;
1021
1022 memcpy(state->config_space, dev->saved_config_space,
1023 sizeof(state->config_space));
1024
1025 cap = state->cap;
1026 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1027 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1028 memcpy(cap, &tmp->cap, len);
1029 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1030 }
1031 /* Empty cap_save terminates list */
1032
1033 return state;
1034}
1035EXPORT_SYMBOL_GPL(pci_store_saved_state);
1036
1037/**
1038 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1039 * @dev: PCI device that we're dealing with
1040 * @state: Saved state returned from pci_store_saved_state()
1041 */
1042int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1043{
1044 struct pci_cap_saved_data *cap;
1045
1046 dev->state_saved = false;
1047
1048 if (!state)
1049 return 0;
1050
1051 memcpy(dev->saved_config_space, state->config_space,
1052 sizeof(state->config_space));
1053
1054 cap = state->cap;
1055 while (cap->size) {
1056 struct pci_cap_saved_state *tmp;
1057
1058 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1059 if (!tmp || tmp->cap.size != cap->size)
1060 return -EINVAL;
1061
1062 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1063 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1064 sizeof(struct pci_cap_saved_data) + cap->size);
1065 }
1066
1067 dev->state_saved = true;
1068 return 0;
1069}
1070EXPORT_SYMBOL_GPL(pci_load_saved_state);
1071
1072/**
1073 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1074 * and free the memory allocated for it.
1075 * @dev: PCI device that we're dealing with
1076 * @state: Pointer to saved state returned from pci_store_saved_state()
1077 */
1078int pci_load_and_free_saved_state(struct pci_dev *dev,
1079 struct pci_saved_state **state)
1080{
1081 int ret = pci_load_saved_state(dev, *state);
1082 kfree(*state);
1083 *state = NULL;
1084 return ret;
1085}
1086EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1087
38cc1302
HS
1088static int do_pci_enable_device(struct pci_dev *dev, int bars)
1089{
1090 int err;
1091
1092 err = pci_set_power_state(dev, PCI_D0);
1093 if (err < 0 && err != -EIO)
1094 return err;
1095 err = pcibios_enable_device(dev, bars);
1096 if (err < 0)
1097 return err;
1098 pci_fixup_device(pci_fixup_enable, dev);
1099
1100 return 0;
1101}
1102
1103/**
0b62e13b 1104 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1105 * @dev: PCI device to be resumed
1106 *
1107 * Note this function is a backend of pci_default_resume and is not supposed
1108 * to be called by normal code, write proper resume handler and use it instead.
1109 */
0b62e13b 1110int pci_reenable_device(struct pci_dev *dev)
38cc1302 1111{
296ccb08 1112 if (pci_is_enabled(dev))
38cc1302
HS
1113 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1114 return 0;
1115}
1116
b718989d
BH
1117static int __pci_enable_device_flags(struct pci_dev *dev,
1118 resource_size_t flags)
1da177e4
LT
1119{
1120 int err;
b718989d 1121 int i, bars = 0;
1da177e4 1122
97c145f7
JB
1123 /*
1124 * Power state could be unknown at this point, either due to a fresh
1125 * boot or a device removal call. So get the current power state
1126 * so that things like MSI message writing will behave as expected
1127 * (e.g. if the device really is in D0 at enable time).
1128 */
1129 if (dev->pm_cap) {
1130 u16 pmcsr;
1131 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1132 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1133 }
1134
9fb625c3
HS
1135 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1136 return 0; /* already enabled */
1137
497f16f2
YL
1138 /* only skip sriov related */
1139 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1140 if (dev->resource[i].flags & flags)
1141 bars |= (1 << i);
1142 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1143 if (dev->resource[i].flags & flags)
1144 bars |= (1 << i);
1145
38cc1302 1146 err = do_pci_enable_device(dev, bars);
95a62965 1147 if (err < 0)
38cc1302 1148 atomic_dec(&dev->enable_cnt);
9fb625c3 1149 return err;
1da177e4
LT
1150}
1151
b718989d
BH
1152/**
1153 * pci_enable_device_io - Initialize a device for use with IO space
1154 * @dev: PCI device to be initialized
1155 *
1156 * Initialize device before it's used by a driver. Ask low-level code
1157 * to enable I/O resources. Wake up the device if it was suspended.
1158 * Beware, this function can fail.
1159 */
1160int pci_enable_device_io(struct pci_dev *dev)
1161{
1162 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1163}
1164
1165/**
1166 * pci_enable_device_mem - Initialize a device for use with Memory space
1167 * @dev: PCI device to be initialized
1168 *
1169 * Initialize device before it's used by a driver. Ask low-level code
1170 * to enable Memory resources. Wake up the device if it was suspended.
1171 * Beware, this function can fail.
1172 */
1173int pci_enable_device_mem(struct pci_dev *dev)
1174{
1175 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1176}
1177
bae94d02
IPG
1178/**
1179 * pci_enable_device - Initialize device before it's used by a driver.
1180 * @dev: PCI device to be initialized
1181 *
1182 * Initialize device before it's used by a driver. Ask low-level code
1183 * to enable I/O and memory. Wake up the device if it was suspended.
1184 * Beware, this function can fail.
1185 *
1186 * Note we don't actually enable the device many times if we call
1187 * this function repeatedly (we just increment the count).
1188 */
1189int pci_enable_device(struct pci_dev *dev)
1190{
b718989d 1191 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1192}
1193
9ac7849e
TH
1194/*
1195 * Managed PCI resources. This manages device on/off, intx/msi/msix
1196 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1197 * there's no need to track it separately. pci_devres is initialized
1198 * when a device is enabled using managed PCI device enable interface.
1199 */
1200struct pci_devres {
7f375f32
TH
1201 unsigned int enabled:1;
1202 unsigned int pinned:1;
9ac7849e
TH
1203 unsigned int orig_intx:1;
1204 unsigned int restore_intx:1;
1205 u32 region_mask;
1206};
1207
1208static void pcim_release(struct device *gendev, void *res)
1209{
1210 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1211 struct pci_devres *this = res;
1212 int i;
1213
1214 if (dev->msi_enabled)
1215 pci_disable_msi(dev);
1216 if (dev->msix_enabled)
1217 pci_disable_msix(dev);
1218
1219 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1220 if (this->region_mask & (1 << i))
1221 pci_release_region(dev, i);
1222
1223 if (this->restore_intx)
1224 pci_intx(dev, this->orig_intx);
1225
7f375f32 1226 if (this->enabled && !this->pinned)
9ac7849e
TH
1227 pci_disable_device(dev);
1228}
1229
1230static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1231{
1232 struct pci_devres *dr, *new_dr;
1233
1234 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1235 if (dr)
1236 return dr;
1237
1238 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1239 if (!new_dr)
1240 return NULL;
1241 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1242}
1243
1244static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1245{
1246 if (pci_is_managed(pdev))
1247 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1248 return NULL;
1249}
1250
1251/**
1252 * pcim_enable_device - Managed pci_enable_device()
1253 * @pdev: PCI device to be initialized
1254 *
1255 * Managed pci_enable_device().
1256 */
1257int pcim_enable_device(struct pci_dev *pdev)
1258{
1259 struct pci_devres *dr;
1260 int rc;
1261
1262 dr = get_pci_dr(pdev);
1263 if (unlikely(!dr))
1264 return -ENOMEM;
b95d58ea
TH
1265 if (dr->enabled)
1266 return 0;
9ac7849e
TH
1267
1268 rc = pci_enable_device(pdev);
1269 if (!rc) {
1270 pdev->is_managed = 1;
7f375f32 1271 dr->enabled = 1;
9ac7849e
TH
1272 }
1273 return rc;
1274}
1275
1276/**
1277 * pcim_pin_device - Pin managed PCI device
1278 * @pdev: PCI device to pin
1279 *
1280 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1281 * driver detach. @pdev must have been enabled with
1282 * pcim_enable_device().
1283 */
1284void pcim_pin_device(struct pci_dev *pdev)
1285{
1286 struct pci_devres *dr;
1287
1288 dr = find_pci_dr(pdev);
7f375f32 1289 WARN_ON(!dr || !dr->enabled);
9ac7849e 1290 if (dr)
7f375f32 1291 dr->pinned = 1;
9ac7849e
TH
1292}
1293
1da177e4
LT
1294/**
1295 * pcibios_disable_device - disable arch specific PCI resources for device dev
1296 * @dev: the PCI device to disable
1297 *
1298 * Disables architecture specific PCI resources for the device. This
1299 * is the default implementation. Architecture implementations can
1300 * override this.
1301 */
1302void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1303
fa58d305
RW
1304static void do_pci_disable_device(struct pci_dev *dev)
1305{
1306 u16 pci_command;
1307
1308 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1309 if (pci_command & PCI_COMMAND_MASTER) {
1310 pci_command &= ~PCI_COMMAND_MASTER;
1311 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1312 }
1313
1314 pcibios_disable_device(dev);
1315}
1316
1317/**
1318 * pci_disable_enabled_device - Disable device without updating enable_cnt
1319 * @dev: PCI device to disable
1320 *
1321 * NOTE: This function is a backend of PCI power management routines and is
1322 * not supposed to be called drivers.
1323 */
1324void pci_disable_enabled_device(struct pci_dev *dev)
1325{
296ccb08 1326 if (pci_is_enabled(dev))
fa58d305
RW
1327 do_pci_disable_device(dev);
1328}
1329
1da177e4
LT
1330/**
1331 * pci_disable_device - Disable PCI device after use
1332 * @dev: PCI device to be disabled
1333 *
1334 * Signal to the system that the PCI device is not in use by the system
1335 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1336 *
1337 * Note we don't actually disable the device until all callers of
ee6583f6 1338 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1339 */
1340void
1341pci_disable_device(struct pci_dev *dev)
1342{
9ac7849e 1343 struct pci_devres *dr;
99dc804d 1344
9ac7849e
TH
1345 dr = find_pci_dr(dev);
1346 if (dr)
7f375f32 1347 dr->enabled = 0;
9ac7849e 1348
bae94d02
IPG
1349 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1350 return;
1351
fa58d305 1352 do_pci_disable_device(dev);
1da177e4 1353
fa58d305 1354 dev->is_busmaster = 0;
1da177e4
LT
1355}
1356
f7bdd12d
BK
1357/**
1358 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1359 * @dev: the PCIe device reset
f7bdd12d
BK
1360 * @state: Reset state to enter into
1361 *
1362 *
45e829ea 1363 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1364 * implementation. Architecture implementations can override this.
1365 */
1366int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1367 enum pcie_reset_state state)
1368{
1369 return -EINVAL;
1370}
1371
1372/**
1373 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1374 * @dev: the PCIe device reset
f7bdd12d
BK
1375 * @state: Reset state to enter into
1376 *
1377 *
1378 * Sets the PCI reset state for the device.
1379 */
1380int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1381{
1382 return pcibios_set_pcie_reset_state(dev, state);
1383}
1384
58ff4633
RW
1385/**
1386 * pci_check_pme_status - Check if given device has generated PME.
1387 * @dev: Device to check.
1388 *
1389 * Check the PME status of the device and if set, clear it and clear PME enable
1390 * (if set). Return 'true' if PME status and PME enable were both set or
1391 * 'false' otherwise.
1392 */
1393bool pci_check_pme_status(struct pci_dev *dev)
1394{
1395 int pmcsr_pos;
1396 u16 pmcsr;
1397 bool ret = false;
1398
1399 if (!dev->pm_cap)
1400 return false;
1401
1402 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1403 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1404 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1405 return false;
1406
1407 /* Clear PME status. */
1408 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1409 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1410 /* Disable PME to avoid interrupt flood. */
1411 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1412 ret = true;
1413 }
1414
1415 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1416
1417 return ret;
1418}
1419
b67ea761
RW
1420/**
1421 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1422 * @dev: Device to handle.
379021d5 1423 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1424 *
1425 * Check if @dev has generated PME and queue a resume request for it in that
1426 * case.
1427 */
379021d5 1428static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1429{
379021d5
RW
1430 if (pme_poll_reset && dev->pme_poll)
1431 dev->pme_poll = false;
1432
c125e96f 1433 if (pci_check_pme_status(dev)) {
c125e96f 1434 pci_wakeup_event(dev);
0f953bf6 1435 pm_request_resume(&dev->dev);
c125e96f 1436 }
b67ea761
RW
1437 return 0;
1438}
1439
1440/**
1441 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1442 * @bus: Top bus of the subtree to walk.
1443 */
1444void pci_pme_wakeup_bus(struct pci_bus *bus)
1445{
1446 if (bus)
379021d5 1447 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1448}
1449
eb9d0fe4
RW
1450/**
1451 * pci_pme_capable - check the capability of PCI device to generate PME#
1452 * @dev: PCI device to handle.
eb9d0fe4
RW
1453 * @state: PCI state from which device will issue PME#.
1454 */
e5899e1b 1455bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1456{
337001b6 1457 if (!dev->pm_cap)
eb9d0fe4
RW
1458 return false;
1459
337001b6 1460 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1461}
1462
df17e62e
MG
1463static void pci_pme_list_scan(struct work_struct *work)
1464{
379021d5 1465 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1466
1467 mutex_lock(&pci_pme_list_mutex);
1468 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1469 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1470 if (pme_dev->dev->pme_poll) {
1471 pci_pme_wakeup(pme_dev->dev, NULL);
1472 } else {
1473 list_del(&pme_dev->list);
1474 kfree(pme_dev);
1475 }
1476 }
1477 if (!list_empty(&pci_pme_list))
1478 schedule_delayed_work(&pci_pme_work,
1479 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1480 }
1481 mutex_unlock(&pci_pme_list_mutex);
1482}
1483
eb9d0fe4
RW
1484/**
1485 * pci_pme_active - enable or disable PCI device's PME# function
1486 * @dev: PCI device to handle.
eb9d0fe4
RW
1487 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1488 *
1489 * The caller must verify that the device is capable of generating PME# before
1490 * calling this function with @enable equal to 'true'.
1491 */
5a6c9b60 1492void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1493{
1494 u16 pmcsr;
1495
337001b6 1496 if (!dev->pm_cap)
eb9d0fe4
RW
1497 return;
1498
337001b6 1499 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1500 /* Clear PME_Status by writing 1 to it and enable PME# */
1501 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1502 if (!enable)
1503 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1504
337001b6 1505 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1506
df17e62e
MG
1507 /* PCI (as opposed to PCIe) PME requires that the device have
1508 its PME# line hooked up correctly. Not all hardware vendors
1509 do this, so the PME never gets delivered and the device
1510 remains asleep. The easiest way around this is to
1511 periodically walk the list of suspended devices and check
1512 whether any have their PME flag set. The assumption is that
1513 we'll wake up often enough anyway that this won't be a huge
1514 hit, and the power savings from the devices will still be a
1515 win. */
1516
379021d5 1517 if (dev->pme_poll) {
df17e62e
MG
1518 struct pci_pme_device *pme_dev;
1519 if (enable) {
1520 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1521 GFP_KERNEL);
1522 if (!pme_dev)
1523 goto out;
1524 pme_dev->dev = dev;
1525 mutex_lock(&pci_pme_list_mutex);
1526 list_add(&pme_dev->list, &pci_pme_list);
1527 if (list_is_singular(&pci_pme_list))
1528 schedule_delayed_work(&pci_pme_work,
1529 msecs_to_jiffies(PME_TIMEOUT));
1530 mutex_unlock(&pci_pme_list_mutex);
1531 } else {
1532 mutex_lock(&pci_pme_list_mutex);
1533 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1534 if (pme_dev->dev == dev) {
1535 list_del(&pme_dev->list);
1536 kfree(pme_dev);
1537 break;
1538 }
1539 }
1540 mutex_unlock(&pci_pme_list_mutex);
1541 }
1542 }
1543
1544out:
85b8582d 1545 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1546}
1547
1da177e4 1548/**
6cbf8214 1549 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1550 * @dev: PCI device affected
1551 * @state: PCI state from which device will issue wakeup events
6cbf8214 1552 * @runtime: True if the events are to be generated at run time
075c1771
DB
1553 * @enable: True to enable event generation; false to disable
1554 *
1555 * This enables the device as a wakeup event source, or disables it.
1556 * When such events involves platform-specific hooks, those hooks are
1557 * called automatically by this routine.
1558 *
1559 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1560 * always require such platform hooks.
075c1771 1561 *
eb9d0fe4
RW
1562 * RETURN VALUE:
1563 * 0 is returned on success
1564 * -EINVAL is returned if device is not supposed to wake up the system
1565 * Error code depending on the platform is returned if both the platform and
1566 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1567 */
6cbf8214
RW
1568int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1569 bool runtime, bool enable)
1da177e4 1570{
5bcc2fb4 1571 int ret = 0;
075c1771 1572
6cbf8214 1573 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1574 return -EINVAL;
1da177e4 1575
e80bb09d
RW
1576 /* Don't do the same thing twice in a row for one device. */
1577 if (!!enable == !!dev->wakeup_prepared)
1578 return 0;
1579
eb9d0fe4
RW
1580 /*
1581 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1582 * Anderson we should be doing PME# wake enable followed by ACPI wake
1583 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1584 */
1da177e4 1585
5bcc2fb4
RW
1586 if (enable) {
1587 int error;
1da177e4 1588
5bcc2fb4
RW
1589 if (pci_pme_capable(dev, state))
1590 pci_pme_active(dev, true);
1591 else
1592 ret = 1;
6cbf8214
RW
1593 error = runtime ? platform_pci_run_wake(dev, true) :
1594 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1595 if (ret)
1596 ret = error;
e80bb09d
RW
1597 if (!ret)
1598 dev->wakeup_prepared = true;
5bcc2fb4 1599 } else {
6cbf8214
RW
1600 if (runtime)
1601 platform_pci_run_wake(dev, false);
1602 else
1603 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1604 pci_pme_active(dev, false);
e80bb09d 1605 dev->wakeup_prepared = false;
5bcc2fb4 1606 }
1da177e4 1607
5bcc2fb4 1608 return ret;
eb9d0fe4 1609}
6cbf8214 1610EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1611
0235c4fc
RW
1612/**
1613 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1614 * @dev: PCI device to prepare
1615 * @enable: True to enable wake-up event generation; false to disable
1616 *
1617 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1618 * and this function allows them to set that up cleanly - pci_enable_wake()
1619 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1620 * ordering constraints.
1621 *
1622 * This function only returns error code if the device is not capable of
1623 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1624 * enable wake-up power for it.
1625 */
1626int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1627{
1628 return pci_pme_capable(dev, PCI_D3cold) ?
1629 pci_enable_wake(dev, PCI_D3cold, enable) :
1630 pci_enable_wake(dev, PCI_D3hot, enable);
1631}
1632
404cc2d8 1633/**
37139074
JB
1634 * pci_target_state - find an appropriate low power state for a given PCI dev
1635 * @dev: PCI device
1636 *
1637 * Use underlying platform code to find a supported low power state for @dev.
1638 * If the platform can't manage @dev, return the deepest state from which it
1639 * can generate wake events, based on any available PME info.
404cc2d8 1640 */
e5899e1b 1641pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1642{
1643 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1644
1645 if (platform_pci_power_manageable(dev)) {
1646 /*
1647 * Call the platform to choose the target state of the device
1648 * and enable wake-up from this state if supported.
1649 */
1650 pci_power_t state = platform_pci_choose_state(dev);
1651
1652 switch (state) {
1653 case PCI_POWER_ERROR:
1654 case PCI_UNKNOWN:
1655 break;
1656 case PCI_D1:
1657 case PCI_D2:
1658 if (pci_no_d1d2(dev))
1659 break;
1660 default:
1661 target_state = state;
404cc2d8 1662 }
d2abdf62
RW
1663 } else if (!dev->pm_cap) {
1664 target_state = PCI_D0;
404cc2d8
RW
1665 } else if (device_may_wakeup(&dev->dev)) {
1666 /*
1667 * Find the deepest state from which the device can generate
1668 * wake-up events, make it the target state and enable device
1669 * to generate PME#.
1670 */
337001b6
RW
1671 if (dev->pme_support) {
1672 while (target_state
1673 && !(dev->pme_support & (1 << target_state)))
1674 target_state--;
404cc2d8
RW
1675 }
1676 }
1677
e5899e1b
RW
1678 return target_state;
1679}
1680
1681/**
1682 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1683 * @dev: Device to handle.
1684 *
1685 * Choose the power state appropriate for the device depending on whether
1686 * it can wake up the system and/or is power manageable by the platform
1687 * (PCI_D3hot is the default) and put the device into that state.
1688 */
1689int pci_prepare_to_sleep(struct pci_dev *dev)
1690{
1691 pci_power_t target_state = pci_target_state(dev);
1692 int error;
1693
1694 if (target_state == PCI_POWER_ERROR)
1695 return -EIO;
1696
8efb8c76 1697 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1698
404cc2d8
RW
1699 error = pci_set_power_state(dev, target_state);
1700
1701 if (error)
1702 pci_enable_wake(dev, target_state, false);
1703
1704 return error;
1705}
1706
1707/**
443bd1c4 1708 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1709 * @dev: Device to handle.
1710 *
88393161 1711 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1712 */
1713int pci_back_from_sleep(struct pci_dev *dev)
1714{
1715 pci_enable_wake(dev, PCI_D0, false);
1716 return pci_set_power_state(dev, PCI_D0);
1717}
1718
6cbf8214
RW
1719/**
1720 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1721 * @dev: PCI device being suspended.
1722 *
1723 * Prepare @dev to generate wake-up events at run time and put it into a low
1724 * power state.
1725 */
1726int pci_finish_runtime_suspend(struct pci_dev *dev)
1727{
1728 pci_power_t target_state = pci_target_state(dev);
1729 int error;
1730
1731 if (target_state == PCI_POWER_ERROR)
1732 return -EIO;
1733
1734 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1735
1736 error = pci_set_power_state(dev, target_state);
1737
1738 if (error)
1739 __pci_enable_wake(dev, target_state, true, false);
1740
1741 return error;
1742}
1743
b67ea761
RW
1744/**
1745 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1746 * @dev: Device to check.
1747 *
1748 * Return true if the device itself is cabable of generating wake-up events
1749 * (through the platform or using the native PCIe PME) or if the device supports
1750 * PME and one of its upstream bridges can generate wake-up events.
1751 */
1752bool pci_dev_run_wake(struct pci_dev *dev)
1753{
1754 struct pci_bus *bus = dev->bus;
1755
1756 if (device_run_wake(&dev->dev))
1757 return true;
1758
1759 if (!dev->pme_support)
1760 return false;
1761
1762 while (bus->parent) {
1763 struct pci_dev *bridge = bus->self;
1764
1765 if (device_run_wake(&bridge->dev))
1766 return true;
1767
1768 bus = bus->parent;
1769 }
1770
1771 /* We have reached the root bus. */
1772 if (bus->bridge)
1773 return device_run_wake(bus->bridge);
1774
1775 return false;
1776}
1777EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1778
eb9d0fe4
RW
1779/**
1780 * pci_pm_init - Initialize PM functions of given PCI device
1781 * @dev: PCI device to handle.
1782 */
1783void pci_pm_init(struct pci_dev *dev)
1784{
1785 int pm;
1786 u16 pmc;
1da177e4 1787
bb910a70 1788 pm_runtime_forbid(&dev->dev);
a1e4d72c 1789 device_enable_async_suspend(&dev->dev);
e80bb09d 1790 dev->wakeup_prepared = false;
bb910a70 1791
337001b6
RW
1792 dev->pm_cap = 0;
1793
eb9d0fe4
RW
1794 /* find PCI PM capability in list */
1795 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1796 if (!pm)
50246dd4 1797 return;
eb9d0fe4
RW
1798 /* Check device's ability to generate PME# */
1799 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1800
eb9d0fe4
RW
1801 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1802 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1803 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1804 return;
eb9d0fe4
RW
1805 }
1806
337001b6 1807 dev->pm_cap = pm;
1ae861e6 1808 dev->d3_delay = PCI_PM_D3_WAIT;
337001b6
RW
1809
1810 dev->d1_support = false;
1811 dev->d2_support = false;
1812 if (!pci_no_d1d2(dev)) {
c9ed77ee 1813 if (pmc & PCI_PM_CAP_D1)
337001b6 1814 dev->d1_support = true;
c9ed77ee 1815 if (pmc & PCI_PM_CAP_D2)
337001b6 1816 dev->d2_support = true;
c9ed77ee
BH
1817
1818 if (dev->d1_support || dev->d2_support)
1819 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1820 dev->d1_support ? " D1" : "",
1821 dev->d2_support ? " D2" : "");
337001b6
RW
1822 }
1823
1824 pmc &= PCI_PM_CAP_PME_MASK;
1825 if (pmc) {
10c3d71d
BH
1826 dev_printk(KERN_DEBUG, &dev->dev,
1827 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1828 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1829 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1830 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1831 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1832 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1833 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1834 dev->pme_poll = true;
eb9d0fe4
RW
1835 /*
1836 * Make device's PM flags reflect the wake-up capability, but
1837 * let the user space enable it to wake up the system as needed.
1838 */
1839 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1840 /* Disable the PME# generation functionality */
337001b6
RW
1841 pci_pme_active(dev, false);
1842 } else {
1843 dev->pme_support = 0;
eb9d0fe4 1844 }
1da177e4
LT
1845}
1846
eb9c39d0
JB
1847/**
1848 * platform_pci_wakeup_init - init platform wakeup if present
1849 * @dev: PCI device
1850 *
1851 * Some devices don't have PCI PM caps but can still generate wakeup
1852 * events through platform methods (like ACPI events). If @dev supports
1853 * platform wakeup events, set the device flag to indicate as much. This
1854 * may be redundant if the device also supports PCI PM caps, but double
1855 * initialization should be safe in that case.
1856 */
1857void platform_pci_wakeup_init(struct pci_dev *dev)
1858{
1859 if (!platform_pci_can_wakeup(dev))
1860 return;
1861
1862 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
1863 platform_pci_sleep_wake(dev, false);
1864}
1865
63f4898a
RW
1866/**
1867 * pci_add_save_buffer - allocate buffer for saving given capability registers
1868 * @dev: the PCI device
1869 * @cap: the capability to allocate the buffer for
1870 * @size: requested size of the buffer
1871 */
1872static int pci_add_cap_save_buffer(
1873 struct pci_dev *dev, char cap, unsigned int size)
1874{
1875 int pos;
1876 struct pci_cap_saved_state *save_state;
1877
1878 pos = pci_find_capability(dev, cap);
1879 if (pos <= 0)
1880 return 0;
1881
1882 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1883 if (!save_state)
1884 return -ENOMEM;
1885
24a4742f
AW
1886 save_state->cap.cap_nr = cap;
1887 save_state->cap.size = size;
63f4898a
RW
1888 pci_add_saved_cap(dev, save_state);
1889
1890 return 0;
1891}
1892
1893/**
1894 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1895 * @dev: the PCI device
1896 */
1897void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1898{
1899 int error;
1900
89858517
YZ
1901 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1902 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
1903 if (error)
1904 dev_err(&dev->dev,
1905 "unable to preallocate PCI Express save buffer\n");
1906
1907 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1908 if (error)
1909 dev_err(&dev->dev,
1910 "unable to preallocate PCI-X save buffer\n");
1911}
1912
58c3a727
YZ
1913/**
1914 * pci_enable_ari - enable ARI forwarding if hardware support it
1915 * @dev: the PCI device
1916 */
1917void pci_enable_ari(struct pci_dev *dev)
1918{
1919 int pos;
1920 u32 cap;
864d296c 1921 u16 flags, ctrl;
8113587c 1922 struct pci_dev *bridge;
58c3a727 1923
5f4d91a1 1924 if (!pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
1925 return;
1926
8113587c
ZY
1927 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1928 if (!pos)
58c3a727
YZ
1929 return;
1930
8113587c 1931 bridge = dev->bus->self;
5f4d91a1 1932 if (!bridge || !pci_is_pcie(bridge))
8113587c
ZY
1933 return;
1934
06a1cbaf 1935 pos = pci_pcie_cap(bridge);
58c3a727
YZ
1936 if (!pos)
1937 return;
1938
864d296c
CW
1939 /* ARI is a PCIe v2 feature */
1940 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1941 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1942 return;
1943
8113587c 1944 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
1945 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1946 return;
1947
8113587c 1948 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
58c3a727 1949 ctrl |= PCI_EXP_DEVCTL2_ARI;
8113587c 1950 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
58c3a727 1951
8113587c 1952 bridge->ari_enabled = 1;
58c3a727
YZ
1953}
1954
b48d4425
JB
1955/**
1956 * pci_enable_ido - enable ID-based ordering on a device
1957 * @dev: the PCI device
1958 * @type: which types of IDO to enable
1959 *
1960 * Enable ID-based ordering on @dev. @type can contain the bits
1961 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1962 * which types of transactions are allowed to be re-ordered.
1963 */
1964void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1965{
1966 int pos;
1967 u16 ctrl;
1968
1969 pos = pci_pcie_cap(dev);
1970 if (!pos)
1971 return;
1972
1973 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1974 if (type & PCI_EXP_IDO_REQUEST)
1975 ctrl |= PCI_EXP_IDO_REQ_EN;
1976 if (type & PCI_EXP_IDO_COMPLETION)
1977 ctrl |= PCI_EXP_IDO_CMP_EN;
1978 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1979}
1980EXPORT_SYMBOL(pci_enable_ido);
1981
1982/**
1983 * pci_disable_ido - disable ID-based ordering on a device
1984 * @dev: the PCI device
1985 * @type: which types of IDO to disable
1986 */
1987void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1988{
1989 int pos;
1990 u16 ctrl;
1991
1992 if (!pci_is_pcie(dev))
1993 return;
1994
1995 pos = pci_pcie_cap(dev);
1996 if (!pos)
1997 return;
1998
1999 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2000 if (type & PCI_EXP_IDO_REQUEST)
2001 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2002 if (type & PCI_EXP_IDO_COMPLETION)
2003 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2004 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2005}
2006EXPORT_SYMBOL(pci_disable_ido);
2007
48a92a81
JB
2008/**
2009 * pci_enable_obff - enable optimized buffer flush/fill
2010 * @dev: PCI device
2011 * @type: type of signaling to use
2012 *
2013 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2014 * signaling if possible, falling back to message signaling only if
2015 * WAKE# isn't supported. @type should indicate whether the PCIe link
2016 * be brought out of L0s or L1 to send the message. It should be either
2017 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2018 *
2019 * If your device can benefit from receiving all messages, even at the
2020 * power cost of bringing the link back up from a low power state, use
2021 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2022 * preferred type).
2023 *
2024 * RETURNS:
2025 * Zero on success, appropriate error number on failure.
2026 */
2027int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2028{
2029 int pos;
2030 u32 cap;
2031 u16 ctrl;
2032 int ret;
2033
2034 if (!pci_is_pcie(dev))
2035 return -ENOTSUPP;
2036
2037 pos = pci_pcie_cap(dev);
2038 if (!pos)
2039 return -ENOTSUPP;
2040
2041 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2042 if (!(cap & PCI_EXP_OBFF_MASK))
2043 return -ENOTSUPP; /* no OBFF support at all */
2044
2045 /* Make sure the topology supports OBFF as well */
2046 if (dev->bus) {
2047 ret = pci_enable_obff(dev->bus->self, type);
2048 if (ret)
2049 return ret;
2050 }
2051
2052 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2053 if (cap & PCI_EXP_OBFF_WAKE)
2054 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2055 else {
2056 switch (type) {
2057 case PCI_EXP_OBFF_SIGNAL_L0:
2058 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2059 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2060 break;
2061 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2062 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2063 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2064 break;
2065 default:
2066 WARN(1, "bad OBFF signal type\n");
2067 return -ENOTSUPP;
2068 }
2069 }
2070 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2071
2072 return 0;
2073}
2074EXPORT_SYMBOL(pci_enable_obff);
2075
2076/**
2077 * pci_disable_obff - disable optimized buffer flush/fill
2078 * @dev: PCI device
2079 *
2080 * Disable OBFF on @dev.
2081 */
2082void pci_disable_obff(struct pci_dev *dev)
2083{
2084 int pos;
2085 u16 ctrl;
2086
2087 if (!pci_is_pcie(dev))
2088 return;
2089
2090 pos = pci_pcie_cap(dev);
2091 if (!pos)
2092 return;
2093
2094 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2095 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2096 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2097}
2098EXPORT_SYMBOL(pci_disable_obff);
2099
51c2e0a7
JB
2100/**
2101 * pci_ltr_supported - check whether a device supports LTR
2102 * @dev: PCI device
2103 *
2104 * RETURNS:
2105 * True if @dev supports latency tolerance reporting, false otherwise.
2106 */
2107bool pci_ltr_supported(struct pci_dev *dev)
2108{
2109 int pos;
2110 u32 cap;
2111
2112 if (!pci_is_pcie(dev))
2113 return false;
2114
2115 pos = pci_pcie_cap(dev);
2116 if (!pos)
2117 return false;
2118
2119 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2120
2121 return cap & PCI_EXP_DEVCAP2_LTR;
2122}
2123EXPORT_SYMBOL(pci_ltr_supported);
2124
2125/**
2126 * pci_enable_ltr - enable latency tolerance reporting
2127 * @dev: PCI device
2128 *
2129 * Enable LTR on @dev if possible, which means enabling it first on
2130 * upstream ports.
2131 *
2132 * RETURNS:
2133 * Zero on success, errno on failure.
2134 */
2135int pci_enable_ltr(struct pci_dev *dev)
2136{
2137 int pos;
2138 u16 ctrl;
2139 int ret;
2140
2141 if (!pci_ltr_supported(dev))
2142 return -ENOTSUPP;
2143
2144 pos = pci_pcie_cap(dev);
2145 if (!pos)
2146 return -ENOTSUPP;
2147
2148 /* Only primary function can enable/disable LTR */
2149 if (PCI_FUNC(dev->devfn) != 0)
2150 return -EINVAL;
2151
2152 /* Enable upstream ports first */
2153 if (dev->bus) {
2154 ret = pci_enable_ltr(dev->bus->self);
2155 if (ret)
2156 return ret;
2157 }
2158
2159 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2160 ctrl |= PCI_EXP_LTR_EN;
2161 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2162
2163 return 0;
2164}
2165EXPORT_SYMBOL(pci_enable_ltr);
2166
2167/**
2168 * pci_disable_ltr - disable latency tolerance reporting
2169 * @dev: PCI device
2170 */
2171void pci_disable_ltr(struct pci_dev *dev)
2172{
2173 int pos;
2174 u16 ctrl;
2175
2176 if (!pci_ltr_supported(dev))
2177 return;
2178
2179 pos = pci_pcie_cap(dev);
2180 if (!pos)
2181 return;
2182
2183 /* Only primary function can enable/disable LTR */
2184 if (PCI_FUNC(dev->devfn) != 0)
2185 return;
2186
2187 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2188 ctrl &= ~PCI_EXP_LTR_EN;
2189 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2190}
2191EXPORT_SYMBOL(pci_disable_ltr);
2192
2193static int __pci_ltr_scale(int *val)
2194{
2195 int scale = 0;
2196
2197 while (*val > 1023) {
2198 *val = (*val + 31) / 32;
2199 scale++;
2200 }
2201 return scale;
2202}
2203
2204/**
2205 * pci_set_ltr - set LTR latency values
2206 * @dev: PCI device
2207 * @snoop_lat_ns: snoop latency in nanoseconds
2208 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2209 *
2210 * Figure out the scale and set the LTR values accordingly.
2211 */
2212int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2213{
2214 int pos, ret, snoop_scale, nosnoop_scale;
2215 u16 val;
2216
2217 if (!pci_ltr_supported(dev))
2218 return -ENOTSUPP;
2219
2220 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2221 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2222
2223 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2224 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2225 return -EINVAL;
2226
2227 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2228 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2229 return -EINVAL;
2230
2231 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2232 if (!pos)
2233 return -ENOTSUPP;
2234
2235 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2236 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2237 if (ret != 4)
2238 return -EIO;
2239
2240 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2241 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2242 if (ret != 4)
2243 return -EIO;
2244
2245 return 0;
2246}
2247EXPORT_SYMBOL(pci_set_ltr);
2248
5d990b62
CW
2249static int pci_acs_enable;
2250
2251/**
2252 * pci_request_acs - ask for ACS to be enabled if supported
2253 */
2254void pci_request_acs(void)
2255{
2256 pci_acs_enable = 1;
2257}
2258
ae21ee65
AK
2259/**
2260 * pci_enable_acs - enable ACS if hardware support it
2261 * @dev: the PCI device
2262 */
2263void pci_enable_acs(struct pci_dev *dev)
2264{
2265 int pos;
2266 u16 cap;
2267 u16 ctrl;
2268
5d990b62
CW
2269 if (!pci_acs_enable)
2270 return;
2271
5f4d91a1 2272 if (!pci_is_pcie(dev))
ae21ee65
AK
2273 return;
2274
2275 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2276 if (!pos)
2277 return;
2278
2279 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2280 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2281
2282 /* Source Validation */
2283 ctrl |= (cap & PCI_ACS_SV);
2284
2285 /* P2P Request Redirect */
2286 ctrl |= (cap & PCI_ACS_RR);
2287
2288 /* P2P Completion Redirect */
2289 ctrl |= (cap & PCI_ACS_CR);
2290
2291 /* Upstream Forwarding */
2292 ctrl |= (cap & PCI_ACS_UF);
2293
2294 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2295}
2296
57c2cf71
BH
2297/**
2298 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2299 * @dev: the PCI device
2300 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2301 *
2302 * Perform INTx swizzling for a device behind one level of bridge. This is
2303 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2304 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2305 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2306 * the PCI Express Base Specification, Revision 2.1)
57c2cf71
BH
2307 */
2308u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2309{
46b952a3
MW
2310 int slot;
2311
2312 if (pci_ari_enabled(dev->bus))
2313 slot = 0;
2314 else
2315 slot = PCI_SLOT(dev->devfn);
2316
2317 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2318}
2319
1da177e4
LT
2320int
2321pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2322{
2323 u8 pin;
2324
514d207d 2325 pin = dev->pin;
1da177e4
LT
2326 if (!pin)
2327 return -1;
878f2e50 2328
8784fd4d 2329 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2330 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2331 dev = dev->bus->self;
2332 }
2333 *bridge = dev;
2334 return pin;
2335}
2336
68feac87
BH
2337/**
2338 * pci_common_swizzle - swizzle INTx all the way to root bridge
2339 * @dev: the PCI device
2340 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2341 *
2342 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2343 * bridges all the way up to a PCI root bus.
2344 */
2345u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2346{
2347 u8 pin = *pinp;
2348
1eb39487 2349 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2350 pin = pci_swizzle_interrupt_pin(dev, pin);
2351 dev = dev->bus->self;
2352 }
2353 *pinp = pin;
2354 return PCI_SLOT(dev->devfn);
2355}
2356
1da177e4
LT
2357/**
2358 * pci_release_region - Release a PCI bar
2359 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2360 * @bar: BAR to release
2361 *
2362 * Releases the PCI I/O and memory resources previously reserved by a
2363 * successful call to pci_request_region. Call this function only
2364 * after all use of the PCI regions has ceased.
2365 */
2366void pci_release_region(struct pci_dev *pdev, int bar)
2367{
9ac7849e
TH
2368 struct pci_devres *dr;
2369
1da177e4
LT
2370 if (pci_resource_len(pdev, bar) == 0)
2371 return;
2372 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2373 release_region(pci_resource_start(pdev, bar),
2374 pci_resource_len(pdev, bar));
2375 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2376 release_mem_region(pci_resource_start(pdev, bar),
2377 pci_resource_len(pdev, bar));
9ac7849e
TH
2378
2379 dr = find_pci_dr(pdev);
2380 if (dr)
2381 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2382}
2383
2384/**
f5ddcac4 2385 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2386 * @pdev: PCI device whose resources are to be reserved
2387 * @bar: BAR to be reserved
2388 * @res_name: Name to be associated with resource.
f5ddcac4 2389 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2390 *
2391 * Mark the PCI region associated with PCI device @pdev BR @bar as
2392 * being reserved by owner @res_name. Do not access any
2393 * address inside the PCI regions unless this call returns
2394 * successfully.
2395 *
f5ddcac4
RD
2396 * If @exclusive is set, then the region is marked so that userspace
2397 * is explicitly not allowed to map the resource via /dev/mem or
2398 * sysfs MMIO access.
2399 *
1da177e4
LT
2400 * Returns 0 on success, or %EBUSY on error. A warning
2401 * message is also printed on failure.
2402 */
e8de1481
AV
2403static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2404 int exclusive)
1da177e4 2405{
9ac7849e
TH
2406 struct pci_devres *dr;
2407
1da177e4
LT
2408 if (pci_resource_len(pdev, bar) == 0)
2409 return 0;
2410
2411 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2412 if (!request_region(pci_resource_start(pdev, bar),
2413 pci_resource_len(pdev, bar), res_name))
2414 goto err_out;
2415 }
2416 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2417 if (!__request_mem_region(pci_resource_start(pdev, bar),
2418 pci_resource_len(pdev, bar), res_name,
2419 exclusive))
1da177e4
LT
2420 goto err_out;
2421 }
9ac7849e
TH
2422
2423 dr = find_pci_dr(pdev);
2424 if (dr)
2425 dr->region_mask |= 1 << bar;
2426
1da177e4
LT
2427 return 0;
2428
2429err_out:
c7dabef8 2430 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2431 &pdev->resource[bar]);
1da177e4
LT
2432 return -EBUSY;
2433}
2434
e8de1481 2435/**
f5ddcac4 2436 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2437 * @pdev: PCI device whose resources are to be reserved
2438 * @bar: BAR to be reserved
f5ddcac4 2439 * @res_name: Name to be associated with resource
e8de1481 2440 *
f5ddcac4 2441 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2442 * being reserved by owner @res_name. Do not access any
2443 * address inside the PCI regions unless this call returns
2444 * successfully.
2445 *
2446 * Returns 0 on success, or %EBUSY on error. A warning
2447 * message is also printed on failure.
2448 */
2449int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2450{
2451 return __pci_request_region(pdev, bar, res_name, 0);
2452}
2453
2454/**
2455 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2456 * @pdev: PCI device whose resources are to be reserved
2457 * @bar: BAR to be reserved
2458 * @res_name: Name to be associated with resource.
2459 *
2460 * Mark the PCI region associated with PCI device @pdev BR @bar as
2461 * being reserved by owner @res_name. Do not access any
2462 * address inside the PCI regions unless this call returns
2463 * successfully.
2464 *
2465 * Returns 0 on success, or %EBUSY on error. A warning
2466 * message is also printed on failure.
2467 *
2468 * The key difference that _exclusive makes it that userspace is
2469 * explicitly not allowed to map the resource via /dev/mem or
2470 * sysfs.
2471 */
2472int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2473{
2474 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2475}
c87deff7
HS
2476/**
2477 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2478 * @pdev: PCI device whose resources were previously reserved
2479 * @bars: Bitmask of BARs to be released
2480 *
2481 * Release selected PCI I/O and memory resources previously reserved.
2482 * Call this function only after all use of the PCI regions has ceased.
2483 */
2484void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2485{
2486 int i;
2487
2488 for (i = 0; i < 6; i++)
2489 if (bars & (1 << i))
2490 pci_release_region(pdev, i);
2491}
2492
e8de1481
AV
2493int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2494 const char *res_name, int excl)
c87deff7
HS
2495{
2496 int i;
2497
2498 for (i = 0; i < 6; i++)
2499 if (bars & (1 << i))
e8de1481 2500 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2501 goto err_out;
2502 return 0;
2503
2504err_out:
2505 while(--i >= 0)
2506 if (bars & (1 << i))
2507 pci_release_region(pdev, i);
2508
2509 return -EBUSY;
2510}
1da177e4 2511
e8de1481
AV
2512
2513/**
2514 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2515 * @pdev: PCI device whose resources are to be reserved
2516 * @bars: Bitmask of BARs to be requested
2517 * @res_name: Name to be associated with resource
2518 */
2519int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2520 const char *res_name)
2521{
2522 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2523}
2524
2525int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2526 int bars, const char *res_name)
2527{
2528 return __pci_request_selected_regions(pdev, bars, res_name,
2529 IORESOURCE_EXCLUSIVE);
2530}
2531
1da177e4
LT
2532/**
2533 * pci_release_regions - Release reserved PCI I/O and memory resources
2534 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2535 *
2536 * Releases all PCI I/O and memory resources previously reserved by a
2537 * successful call to pci_request_regions. Call this function only
2538 * after all use of the PCI regions has ceased.
2539 */
2540
2541void pci_release_regions(struct pci_dev *pdev)
2542{
c87deff7 2543 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2544}
2545
2546/**
2547 * pci_request_regions - Reserved PCI I/O and memory resources
2548 * @pdev: PCI device whose resources are to be reserved
2549 * @res_name: Name to be associated with resource.
2550 *
2551 * Mark all PCI regions associated with PCI device @pdev as
2552 * being reserved by owner @res_name. Do not access any
2553 * address inside the PCI regions unless this call returns
2554 * successfully.
2555 *
2556 * Returns 0 on success, or %EBUSY on error. A warning
2557 * message is also printed on failure.
2558 */
3c990e92 2559int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2560{
c87deff7 2561 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2562}
2563
e8de1481
AV
2564/**
2565 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2566 * @pdev: PCI device whose resources are to be reserved
2567 * @res_name: Name to be associated with resource.
2568 *
2569 * Mark all PCI regions associated with PCI device @pdev as
2570 * being reserved by owner @res_name. Do not access any
2571 * address inside the PCI regions unless this call returns
2572 * successfully.
2573 *
2574 * pci_request_regions_exclusive() will mark the region so that
2575 * /dev/mem and the sysfs MMIO access will not be allowed.
2576 *
2577 * Returns 0 on success, or %EBUSY on error. A warning
2578 * message is also printed on failure.
2579 */
2580int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2581{
2582 return pci_request_selected_regions_exclusive(pdev,
2583 ((1 << 6) - 1), res_name);
2584}
2585
6a479079
BH
2586static void __pci_set_master(struct pci_dev *dev, bool enable)
2587{
2588 u16 old_cmd, cmd;
2589
2590 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2591 if (enable)
2592 cmd = old_cmd | PCI_COMMAND_MASTER;
2593 else
2594 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2595 if (cmd != old_cmd) {
2596 dev_dbg(&dev->dev, "%s bus mastering\n",
2597 enable ? "enabling" : "disabling");
2598 pci_write_config_word(dev, PCI_COMMAND, cmd);
2599 }
2600 dev->is_busmaster = enable;
2601}
e8de1481 2602
96c55900
MS
2603/**
2604 * pcibios_set_master - enable PCI bus-mastering for device dev
2605 * @dev: the PCI device to enable
2606 *
2607 * Enables PCI bus-mastering for the device. This is the default
2608 * implementation. Architecture specific implementations can override
2609 * this if necessary.
2610 */
2611void __weak pcibios_set_master(struct pci_dev *dev)
2612{
2613 u8 lat;
2614
f676678f
MS
2615 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2616 if (pci_is_pcie(dev))
2617 return;
2618
96c55900
MS
2619 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2620 if (lat < 16)
2621 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2622 else if (lat > pcibios_max_latency)
2623 lat = pcibios_max_latency;
2624 else
2625 return;
2626 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2627 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2628}
2629
1da177e4
LT
2630/**
2631 * pci_set_master - enables bus-mastering for device dev
2632 * @dev: the PCI device to enable
2633 *
2634 * Enables bus-mastering on the device and calls pcibios_set_master()
2635 * to do the needed arch specific settings.
2636 */
6a479079 2637void pci_set_master(struct pci_dev *dev)
1da177e4 2638{
6a479079 2639 __pci_set_master(dev, true);
1da177e4
LT
2640 pcibios_set_master(dev);
2641}
2642
6a479079
BH
2643/**
2644 * pci_clear_master - disables bus-mastering for device dev
2645 * @dev: the PCI device to disable
2646 */
2647void pci_clear_master(struct pci_dev *dev)
2648{
2649 __pci_set_master(dev, false);
2650}
2651
1da177e4 2652/**
edb2d97e
MW
2653 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2654 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2655 *
edb2d97e
MW
2656 * Helper function for pci_set_mwi.
2657 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2658 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2659 *
2660 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2661 */
15ea76d4 2662int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2663{
2664 u8 cacheline_size;
2665
2666 if (!pci_cache_line_size)
15ea76d4 2667 return -EINVAL;
1da177e4
LT
2668
2669 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2670 equal to or multiple of the right value. */
2671 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2672 if (cacheline_size >= pci_cache_line_size &&
2673 (cacheline_size % pci_cache_line_size) == 0)
2674 return 0;
2675
2676 /* Write the correct value. */
2677 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2678 /* Read it back. */
2679 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2680 if (cacheline_size == pci_cache_line_size)
2681 return 0;
2682
80ccba11
BH
2683 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2684 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2685
2686 return -EINVAL;
2687}
15ea76d4
TH
2688EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2689
2690#ifdef PCI_DISABLE_MWI
2691int pci_set_mwi(struct pci_dev *dev)
2692{
2693 return 0;
2694}
2695
2696int pci_try_set_mwi(struct pci_dev *dev)
2697{
2698 return 0;
2699}
2700
2701void pci_clear_mwi(struct pci_dev *dev)
2702{
2703}
2704
2705#else
1da177e4
LT
2706
2707/**
2708 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2709 * @dev: the PCI device for which MWI is enabled
2710 *
694625c0 2711 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2712 *
2713 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2714 */
2715int
2716pci_set_mwi(struct pci_dev *dev)
2717{
2718 int rc;
2719 u16 cmd;
2720
edb2d97e 2721 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2722 if (rc)
2723 return rc;
2724
2725 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2726 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2727 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2728 cmd |= PCI_COMMAND_INVALIDATE;
2729 pci_write_config_word(dev, PCI_COMMAND, cmd);
2730 }
2731
2732 return 0;
2733}
2734
694625c0
RD
2735/**
2736 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2737 * @dev: the PCI device for which MWI is enabled
2738 *
2739 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2740 * Callers are not required to check the return value.
2741 *
2742 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2743 */
2744int pci_try_set_mwi(struct pci_dev *dev)
2745{
2746 int rc = pci_set_mwi(dev);
2747 return rc;
2748}
2749
1da177e4
LT
2750/**
2751 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2752 * @dev: the PCI device to disable
2753 *
2754 * Disables PCI Memory-Write-Invalidate transaction on the device
2755 */
2756void
2757pci_clear_mwi(struct pci_dev *dev)
2758{
2759 u16 cmd;
2760
2761 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2762 if (cmd & PCI_COMMAND_INVALIDATE) {
2763 cmd &= ~PCI_COMMAND_INVALIDATE;
2764 pci_write_config_word(dev, PCI_COMMAND, cmd);
2765 }
2766}
edb2d97e 2767#endif /* ! PCI_DISABLE_MWI */
1da177e4 2768
a04ce0ff
BR
2769/**
2770 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2771 * @pdev: the PCI device to operate on
2772 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2773 *
2774 * Enables/disables PCI INTx for device dev
2775 */
2776void
2777pci_intx(struct pci_dev *pdev, int enable)
2778{
2779 u16 pci_command, new;
2780
2781 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2782
2783 if (enable) {
2784 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2785 } else {
2786 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2787 }
2788
2789 if (new != pci_command) {
9ac7849e
TH
2790 struct pci_devres *dr;
2791
2fd9d74b 2792 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2793
2794 dr = find_pci_dr(pdev);
2795 if (dr && !dr->restore_intx) {
2796 dr->restore_intx = 1;
2797 dr->orig_intx = !enable;
2798 }
a04ce0ff
BR
2799 }
2800}
2801
a2e27787
JK
2802/**
2803 * pci_intx_mask_supported - probe for INTx masking support
2804 * @pdev: the PCI device to operate on
2805 *
2806 * Check if the device dev support INTx masking via the config space
2807 * command word.
2808 */
2809bool pci_intx_mask_supported(struct pci_dev *dev)
2810{
2811 bool mask_supported = false;
2812 u16 orig, new;
2813
2814 pci_cfg_access_lock(dev);
2815
2816 pci_read_config_word(dev, PCI_COMMAND, &orig);
2817 pci_write_config_word(dev, PCI_COMMAND,
2818 orig ^ PCI_COMMAND_INTX_DISABLE);
2819 pci_read_config_word(dev, PCI_COMMAND, &new);
2820
2821 /*
2822 * There's no way to protect against hardware bugs or detect them
2823 * reliably, but as long as we know what the value should be, let's
2824 * go ahead and check it.
2825 */
2826 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2827 dev_err(&dev->dev, "Command register changed from "
2828 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2829 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2830 mask_supported = true;
2831 pci_write_config_word(dev, PCI_COMMAND, orig);
2832 }
2833
2834 pci_cfg_access_unlock(dev);
2835 return mask_supported;
2836}
2837EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2838
2839static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2840{
2841 struct pci_bus *bus = dev->bus;
2842 bool mask_updated = true;
2843 u32 cmd_status_dword;
2844 u16 origcmd, newcmd;
2845 unsigned long flags;
2846 bool irq_pending;
2847
2848 /*
2849 * We do a single dword read to retrieve both command and status.
2850 * Document assumptions that make this possible.
2851 */
2852 BUILD_BUG_ON(PCI_COMMAND % 4);
2853 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2854
2855 raw_spin_lock_irqsave(&pci_lock, flags);
2856
2857 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2858
2859 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2860
2861 /*
2862 * Check interrupt status register to see whether our device
2863 * triggered the interrupt (when masking) or the next IRQ is
2864 * already pending (when unmasking).
2865 */
2866 if (mask != irq_pending) {
2867 mask_updated = false;
2868 goto done;
2869 }
2870
2871 origcmd = cmd_status_dword;
2872 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2873 if (mask)
2874 newcmd |= PCI_COMMAND_INTX_DISABLE;
2875 if (newcmd != origcmd)
2876 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2877
2878done:
2879 raw_spin_unlock_irqrestore(&pci_lock, flags);
2880
2881 return mask_updated;
2882}
2883
2884/**
2885 * pci_check_and_mask_intx - mask INTx on pending interrupt
2886 * @pdev: the PCI device to operate on
2887 *
2888 * Check if the device dev has its INTx line asserted, mask it and
2889 * return true in that case. False is returned if not interrupt was
2890 * pending.
2891 */
2892bool pci_check_and_mask_intx(struct pci_dev *dev)
2893{
2894 return pci_check_and_set_intx_mask(dev, true);
2895}
2896EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2897
2898/**
2899 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2900 * @pdev: the PCI device to operate on
2901 *
2902 * Check if the device dev has its INTx line asserted, unmask it if not
2903 * and return true. False is returned and the mask remains active if
2904 * there was still an interrupt pending.
2905 */
2906bool pci_check_and_unmask_intx(struct pci_dev *dev)
2907{
2908 return pci_check_and_set_intx_mask(dev, false);
2909}
2910EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2911
f5f2b131
EB
2912/**
2913 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 2914 * @dev: the PCI device to operate on
f5f2b131
EB
2915 *
2916 * If you want to use msi see pci_enable_msi and friends.
2917 * This is a lower level primitive that allows us to disable
2918 * msi operation at the device level.
2919 */
2920void pci_msi_off(struct pci_dev *dev)
2921{
2922 int pos;
2923 u16 control;
2924
2925 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2926 if (pos) {
2927 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2928 control &= ~PCI_MSI_FLAGS_ENABLE;
2929 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2930 }
2931 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2932 if (pos) {
2933 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2934 control &= ~PCI_MSIX_FLAGS_ENABLE;
2935 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2936 }
2937}
b03214d5 2938EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 2939
4d57cdfa
FT
2940int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2941{
2942 return dma_set_max_seg_size(&dev->dev, size);
2943}
2944EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 2945
59fc67de
FT
2946int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2947{
2948 return dma_set_seg_boundary(&dev->dev, mask);
2949}
2950EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 2951
8c1c699f 2952static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 2953{
8c1c699f
YZ
2954 int i;
2955 int pos;
8dd7f803 2956 u32 cap;
04b55c47 2957 u16 status, control;
8dd7f803 2958
06a1cbaf 2959 pos = pci_pcie_cap(dev);
8c1c699f 2960 if (!pos)
8dd7f803 2961 return -ENOTTY;
8c1c699f
YZ
2962
2963 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
2964 if (!(cap & PCI_EXP_DEVCAP_FLR))
2965 return -ENOTTY;
2966
d91cdc74
SY
2967 if (probe)
2968 return 0;
2969
8dd7f803 2970 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
2971 for (i = 0; i < 4; i++) {
2972 if (i)
2973 msleep((1 << (i - 1)) * 100);
5fe5db05 2974
8c1c699f
YZ
2975 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2976 if (!(status & PCI_EXP_DEVSTA_TRPND))
2977 goto clear;
2978 }
2979
2980 dev_err(&dev->dev, "transaction is not cleared; "
2981 "proceeding with reset anyway\n");
2982
2983clear:
04b55c47
SR
2984 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2985 control |= PCI_EXP_DEVCTL_BCR_FLR;
2986 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2987
8c1c699f 2988 msleep(100);
8dd7f803 2989
8dd7f803
SY
2990 return 0;
2991}
d91cdc74 2992
8c1c699f 2993static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 2994{
8c1c699f
YZ
2995 int i;
2996 int pos;
1ca88797 2997 u8 cap;
8c1c699f 2998 u8 status;
1ca88797 2999
8c1c699f
YZ
3000 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3001 if (!pos)
1ca88797 3002 return -ENOTTY;
8c1c699f
YZ
3003
3004 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3005 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3006 return -ENOTTY;
3007
3008 if (probe)
3009 return 0;
3010
1ca88797 3011 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3012 for (i = 0; i < 4; i++) {
3013 if (i)
3014 msleep((1 << (i - 1)) * 100);
3015
3016 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3017 if (!(status & PCI_AF_STATUS_TP))
3018 goto clear;
3019 }
5fe5db05 3020
8c1c699f
YZ
3021 dev_err(&dev->dev, "transaction is not cleared; "
3022 "proceeding with reset anyway\n");
5fe5db05 3023
8c1c699f
YZ
3024clear:
3025 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3026 msleep(100);
8c1c699f 3027
1ca88797
SY
3028 return 0;
3029}
3030
83d74e03
RW
3031/**
3032 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3033 * @dev: Device to reset.
3034 * @probe: If set, only check if the device can be reset this way.
3035 *
3036 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3037 * unset, it will be reinitialized internally when going from PCI_D3hot to
3038 * PCI_D0. If that's the case and the device is not in a low-power state
3039 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3040 *
3041 * NOTE: This causes the caller to sleep for twice the device power transition
3042 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3043 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3044 * Moreover, only devices in D0 can be reset by this function.
3045 */
f85876ba 3046static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3047{
f85876ba
YZ
3048 u16 csr;
3049
3050 if (!dev->pm_cap)
3051 return -ENOTTY;
d91cdc74 3052
f85876ba
YZ
3053 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3054 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3055 return -ENOTTY;
d91cdc74 3056
f85876ba
YZ
3057 if (probe)
3058 return 0;
1ca88797 3059
f85876ba
YZ
3060 if (dev->current_state != PCI_D0)
3061 return -EINVAL;
3062
3063 csr &= ~PCI_PM_CTRL_STATE_MASK;
3064 csr |= PCI_D3hot;
3065 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3066 pci_dev_d3_sleep(dev);
f85876ba
YZ
3067
3068 csr &= ~PCI_PM_CTRL_STATE_MASK;
3069 csr |= PCI_D0;
3070 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3071 pci_dev_d3_sleep(dev);
f85876ba
YZ
3072
3073 return 0;
3074}
3075
c12ff1df
YZ
3076static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3077{
3078 u16 ctrl;
3079 struct pci_dev *pdev;
3080
654b75e0 3081 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3082 return -ENOTTY;
3083
3084 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3085 if (pdev != dev)
3086 return -ENOTTY;
3087
3088 if (probe)
3089 return 0;
3090
3091 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3092 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3093 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3094 msleep(100);
3095
3096 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3097 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3098 msleep(100);
3099
3100 return 0;
3101}
3102
8c1c699f 3103static int pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3104{
8c1c699f
YZ
3105 int rc;
3106
3107 might_sleep();
3108
3109 if (!probe) {
fb51ccbf 3110 pci_cfg_access_lock(dev);
8c1c699f 3111 /* block PM suspend, driver probe, etc. */
8e9394ce 3112 device_lock(&dev->dev);
8c1c699f 3113 }
d91cdc74 3114
b9c3b266
DC
3115 rc = pci_dev_specific_reset(dev, probe);
3116 if (rc != -ENOTTY)
3117 goto done;
3118
8c1c699f
YZ
3119 rc = pcie_flr(dev, probe);
3120 if (rc != -ENOTTY)
3121 goto done;
d91cdc74 3122
8c1c699f 3123 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3124 if (rc != -ENOTTY)
3125 goto done;
3126
3127 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3128 if (rc != -ENOTTY)
3129 goto done;
3130
3131 rc = pci_parent_bus_reset(dev, probe);
8c1c699f
YZ
3132done:
3133 if (!probe) {
8e9394ce 3134 device_unlock(&dev->dev);
fb51ccbf 3135 pci_cfg_access_unlock(dev);
8c1c699f 3136 }
1ca88797 3137
8c1c699f 3138 return rc;
d91cdc74
SY
3139}
3140
3141/**
8c1c699f
YZ
3142 * __pci_reset_function - reset a PCI device function
3143 * @dev: PCI device to reset
d91cdc74
SY
3144 *
3145 * Some devices allow an individual function to be reset without affecting
3146 * other functions in the same device. The PCI device must be responsive
3147 * to PCI config space in order to use this function.
3148 *
3149 * The device function is presumed to be unused when this function is called.
3150 * Resetting the device will make the contents of PCI configuration space
3151 * random, so any caller of this must be prepared to reinitialise the
3152 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3153 * etc.
3154 *
8c1c699f 3155 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3156 * device doesn't support resetting a single function.
3157 */
8c1c699f 3158int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3159{
8c1c699f 3160 return pci_dev_reset(dev, 0);
d91cdc74 3161}
8c1c699f 3162EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3163
711d5779
MT
3164/**
3165 * pci_probe_reset_function - check whether the device can be safely reset
3166 * @dev: PCI device to reset
3167 *
3168 * Some devices allow an individual function to be reset without affecting
3169 * other functions in the same device. The PCI device must be responsive
3170 * to PCI config space in order to use this function.
3171 *
3172 * Returns 0 if the device function can be reset or negative if the
3173 * device doesn't support resetting a single function.
3174 */
3175int pci_probe_reset_function(struct pci_dev *dev)
3176{
3177 return pci_dev_reset(dev, 1);
3178}
3179
8dd7f803 3180/**
8c1c699f
YZ
3181 * pci_reset_function - quiesce and reset a PCI device function
3182 * @dev: PCI device to reset
8dd7f803
SY
3183 *
3184 * Some devices allow an individual function to be reset without affecting
3185 * other functions in the same device. The PCI device must be responsive
3186 * to PCI config space in order to use this function.
3187 *
3188 * This function does not just reset the PCI portion of a device, but
3189 * clears all the state associated with the device. This function differs
8c1c699f 3190 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3191 * over the reset.
3192 *
8c1c699f 3193 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3194 * device doesn't support resetting a single function.
3195 */
3196int pci_reset_function(struct pci_dev *dev)
3197{
8c1c699f 3198 int rc;
8dd7f803 3199
8c1c699f
YZ
3200 rc = pci_dev_reset(dev, 1);
3201 if (rc)
3202 return rc;
8dd7f803 3203
8dd7f803
SY
3204 pci_save_state(dev);
3205
8c1c699f
YZ
3206 /*
3207 * both INTx and MSI are disabled after the Interrupt Disable bit
3208 * is set and the Bus Master bit is cleared.
3209 */
8dd7f803
SY
3210 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3211
8c1c699f 3212 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3213
3214 pci_restore_state(dev);
8dd7f803 3215
8c1c699f 3216 return rc;
8dd7f803
SY
3217}
3218EXPORT_SYMBOL_GPL(pci_reset_function);
3219
d556ad4b
PO
3220/**
3221 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3222 * @dev: PCI device to query
3223 *
3224 * Returns mmrbc: maximum designed memory read count in bytes
3225 * or appropriate error value.
3226 */
3227int pcix_get_max_mmrbc(struct pci_dev *dev)
3228{
7c9e2b1c 3229 int cap;
d556ad4b
PO
3230 u32 stat;
3231
3232 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3233 if (!cap)
3234 return -EINVAL;
3235
7c9e2b1c 3236 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3237 return -EINVAL;
3238
25daeb55 3239 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3240}
3241EXPORT_SYMBOL(pcix_get_max_mmrbc);
3242
3243/**
3244 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3245 * @dev: PCI device to query
3246 *
3247 * Returns mmrbc: maximum memory read count in bytes
3248 * or appropriate error value.
3249 */
3250int pcix_get_mmrbc(struct pci_dev *dev)
3251{
7c9e2b1c 3252 int cap;
bdc2bda7 3253 u16 cmd;
d556ad4b
PO
3254
3255 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3256 if (!cap)
3257 return -EINVAL;
3258
7c9e2b1c
DN
3259 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3260 return -EINVAL;
d556ad4b 3261
7c9e2b1c 3262 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3263}
3264EXPORT_SYMBOL(pcix_get_mmrbc);
3265
3266/**
3267 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3268 * @dev: PCI device to query
3269 * @mmrbc: maximum memory read count in bytes
3270 * valid values are 512, 1024, 2048, 4096
3271 *
3272 * If possible sets maximum memory read byte count, some bridges have erratas
3273 * that prevent this.
3274 */
3275int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3276{
7c9e2b1c 3277 int cap;
bdc2bda7
DN
3278 u32 stat, v, o;
3279 u16 cmd;
d556ad4b 3280
229f5afd 3281 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3282 return -EINVAL;
d556ad4b
PO
3283
3284 v = ffs(mmrbc) - 10;
3285
3286 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3287 if (!cap)
7c9e2b1c 3288 return -EINVAL;
d556ad4b 3289
7c9e2b1c
DN
3290 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3291 return -EINVAL;
d556ad4b
PO
3292
3293 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3294 return -E2BIG;
3295
7c9e2b1c
DN
3296 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3297 return -EINVAL;
d556ad4b
PO
3298
3299 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3300 if (o != v) {
3301 if (v > o && dev->bus &&
3302 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3303 return -EIO;
3304
3305 cmd &= ~PCI_X_CMD_MAX_READ;
3306 cmd |= v << 2;
7c9e2b1c
DN
3307 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3308 return -EIO;
d556ad4b 3309 }
7c9e2b1c 3310 return 0;
d556ad4b
PO
3311}
3312EXPORT_SYMBOL(pcix_set_mmrbc);
3313
3314/**
3315 * pcie_get_readrq - get PCI Express read request size
3316 * @dev: PCI device to query
3317 *
3318 * Returns maximum memory read request in bytes
3319 * or appropriate error value.
3320 */
3321int pcie_get_readrq(struct pci_dev *dev)
3322{
3323 int ret, cap;
3324 u16 ctl;
3325
06a1cbaf 3326 cap = pci_pcie_cap(dev);
d556ad4b
PO
3327 if (!cap)
3328 return -EINVAL;
3329
3330 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3331 if (!ret)
93e75fab 3332 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3333
3334 return ret;
3335}
3336EXPORT_SYMBOL(pcie_get_readrq);
3337
3338/**
3339 * pcie_set_readrq - set PCI Express maximum memory read request
3340 * @dev: PCI device to query
42e61f4a 3341 * @rq: maximum memory read count in bytes
d556ad4b
PO
3342 * valid values are 128, 256, 512, 1024, 2048, 4096
3343 *
c9b378c7 3344 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3345 */
3346int pcie_set_readrq(struct pci_dev *dev, int rq)
3347{
3348 int cap, err = -EINVAL;
3349 u16 ctl, v;
3350
229f5afd 3351 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
d556ad4b
PO
3352 goto out;
3353
06a1cbaf 3354 cap = pci_pcie_cap(dev);
d556ad4b
PO
3355 if (!cap)
3356 goto out;
3357
3358 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3359 if (err)
3360 goto out;
a1c473aa
BH
3361 /*
3362 * If using the "performance" PCIe config, we clamp the
3363 * read rq size to the max packet size to prevent the
3364 * host bridge generating requests larger than we can
3365 * cope with
3366 */
3367 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3368 int mps = pcie_get_mps(dev);
3369
3370 if (mps < 0)
3371 return mps;
3372 if (mps < rq)
3373 rq = mps;
3374 }
3375
3376 v = (ffs(rq) - 8) << 12;
d556ad4b
PO
3377
3378 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3379 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3380 ctl |= v;
c9b378c7 3381 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
d556ad4b
PO
3382 }
3383
3384out:
3385 return err;
3386}
3387EXPORT_SYMBOL(pcie_set_readrq);
3388
b03e7495
JM
3389/**
3390 * pcie_get_mps - get PCI Express maximum payload size
3391 * @dev: PCI device to query
3392 *
3393 * Returns maximum payload size in bytes
3394 * or appropriate error value.
3395 */
3396int pcie_get_mps(struct pci_dev *dev)
3397{
3398 int ret, cap;
3399 u16 ctl;
3400
3401 cap = pci_pcie_cap(dev);
3402 if (!cap)
3403 return -EINVAL;
3404
3405 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3406 if (!ret)
3407 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3408
3409 return ret;
3410}
3411
3412/**
3413 * pcie_set_mps - set PCI Express maximum payload size
3414 * @dev: PCI device to query
47c08f31 3415 * @mps: maximum payload size in bytes
b03e7495
JM
3416 * valid values are 128, 256, 512, 1024, 2048, 4096
3417 *
3418 * If possible sets maximum payload size
3419 */
3420int pcie_set_mps(struct pci_dev *dev, int mps)
3421{
3422 int cap, err = -EINVAL;
3423 u16 ctl, v;
3424
3425 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3426 goto out;
3427
3428 v = ffs(mps) - 8;
3429 if (v > dev->pcie_mpss)
3430 goto out;
3431 v <<= 5;
3432
3433 cap = pci_pcie_cap(dev);
3434 if (!cap)
3435 goto out;
3436
3437 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3438 if (err)
3439 goto out;
3440
3441 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3442 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3443 ctl |= v;
3444 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3445 }
3446out:
3447 return err;
3448}
3449
c87deff7
HS
3450/**
3451 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3452 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3453 * @flags: resource type mask to be selected
3454 *
3455 * This helper routine makes bar mask from the type of resource.
3456 */
3457int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3458{
3459 int i, bars = 0;
3460 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3461 if (pci_resource_flags(dev, i) & flags)
3462 bars |= (1 << i);
3463 return bars;
3464}
3465
613e7ed6
YZ
3466/**
3467 * pci_resource_bar - get position of the BAR associated with a resource
3468 * @dev: the PCI device
3469 * @resno: the resource number
3470 * @type: the BAR type to be filled in
3471 *
3472 * Returns BAR position in config space, or 0 if the BAR is invalid.
3473 */
3474int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3475{
d1b054da
YZ
3476 int reg;
3477
613e7ed6
YZ
3478 if (resno < PCI_ROM_RESOURCE) {
3479 *type = pci_bar_unknown;
3480 return PCI_BASE_ADDRESS_0 + 4 * resno;
3481 } else if (resno == PCI_ROM_RESOURCE) {
3482 *type = pci_bar_mem32;
3483 return dev->rom_base_reg;
d1b054da
YZ
3484 } else if (resno < PCI_BRIDGE_RESOURCES) {
3485 /* device specific resource */
3486 reg = pci_iov_resource_bar(dev, resno, type);
3487 if (reg)
3488 return reg;
613e7ed6
YZ
3489 }
3490
865df576 3491 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3492 return 0;
3493}
3494
95a8b6ef
MT
3495/* Some architectures require additional programming to enable VGA */
3496static arch_set_vga_state_t arch_set_vga_state;
3497
3498void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3499{
3500 arch_set_vga_state = func; /* NULL disables */
3501}
3502
3503static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3504 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3505{
3506 if (arch_set_vga_state)
3507 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3508 flags);
95a8b6ef
MT
3509 return 0;
3510}
3511
deb2d2ec
BH
3512/**
3513 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3514 * @dev: the PCI device
3515 * @decode: true = enable decoding, false = disable decoding
3516 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3517 * @flags: traverse ancestors and change bridges
3448a19d 3518 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3519 */
3520int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3521 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3522{
3523 struct pci_bus *bus;
3524 struct pci_dev *bridge;
3525 u16 cmd;
95a8b6ef 3526 int rc;
deb2d2ec 3527
3448a19d 3528 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3529
95a8b6ef 3530 /* ARCH specific VGA enables */
3448a19d 3531 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3532 if (rc)
3533 return rc;
3534
3448a19d
DA
3535 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3536 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3537 if (decode == true)
3538 cmd |= command_bits;
3539 else
3540 cmd &= ~command_bits;
3541 pci_write_config_word(dev, PCI_COMMAND, cmd);
3542 }
deb2d2ec 3543
3448a19d 3544 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3545 return 0;
3546
3547 bus = dev->bus;
3548 while (bus) {
3549 bridge = bus->self;
3550 if (bridge) {
3551 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3552 &cmd);
3553 if (decode == true)
3554 cmd |= PCI_BRIDGE_CTL_VGA;
3555 else
3556 cmd &= ~PCI_BRIDGE_CTL_VGA;
3557 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3558 cmd);
3559 }
3560 bus = bus->parent;
3561 }
3562 return 0;
3563}
3564
32a9a682
YS
3565#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3566static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3567static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3568
3569/**
3570 * pci_specified_resource_alignment - get resource alignment specified by user.
3571 * @dev: the PCI device to get
3572 *
3573 * RETURNS: Resource alignment if it is specified.
3574 * Zero if it is not specified.
3575 */
3576resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3577{
3578 int seg, bus, slot, func, align_order, count;
3579 resource_size_t align = 0;
3580 char *p;
3581
3582 spin_lock(&resource_alignment_lock);
3583 p = resource_alignment_param;
3584 while (*p) {
3585 count = 0;
3586 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3587 p[count] == '@') {
3588 p += count + 1;
3589 } else {
3590 align_order = -1;
3591 }
3592 if (sscanf(p, "%x:%x:%x.%x%n",
3593 &seg, &bus, &slot, &func, &count) != 4) {
3594 seg = 0;
3595 if (sscanf(p, "%x:%x.%x%n",
3596 &bus, &slot, &func, &count) != 3) {
3597 /* Invalid format */
3598 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3599 p);
3600 break;
3601 }
3602 }
3603 p += count;
3604 if (seg == pci_domain_nr(dev->bus) &&
3605 bus == dev->bus->number &&
3606 slot == PCI_SLOT(dev->devfn) &&
3607 func == PCI_FUNC(dev->devfn)) {
3608 if (align_order == -1) {
3609 align = PAGE_SIZE;
3610 } else {
3611 align = 1 << align_order;
3612 }
3613 /* Found */
3614 break;
3615 }
3616 if (*p != ';' && *p != ',') {
3617 /* End of param or invalid format */
3618 break;
3619 }
3620 p++;
3621 }
3622 spin_unlock(&resource_alignment_lock);
3623 return align;
3624}
3625
3626/**
3627 * pci_is_reassigndev - check if specified PCI is target device to reassign
3628 * @dev: the PCI device to check
3629 *
3630 * RETURNS: non-zero for PCI device is a target device to reassign,
3631 * or zero is not.
3632 */
3633int pci_is_reassigndev(struct pci_dev *dev)
3634{
3635 return (pci_specified_resource_alignment(dev) != 0);
3636}
3637
3638ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3639{
3640 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3641 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3642 spin_lock(&resource_alignment_lock);
3643 strncpy(resource_alignment_param, buf, count);
3644 resource_alignment_param[count] = '\0';
3645 spin_unlock(&resource_alignment_lock);
3646 return count;
3647}
3648
3649ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3650{
3651 size_t count;
3652 spin_lock(&resource_alignment_lock);
3653 count = snprintf(buf, size, "%s", resource_alignment_param);
3654 spin_unlock(&resource_alignment_lock);
3655 return count;
3656}
3657
3658static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3659{
3660 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3661}
3662
3663static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3664 const char *buf, size_t count)
3665{
3666 return pci_set_resource_alignment_param(buf, count);
3667}
3668
3669BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3670 pci_resource_alignment_store);
3671
3672static int __init pci_resource_alignment_sysfs_init(void)
3673{
3674 return bus_create_file(&pci_bus_type,
3675 &bus_attr_resource_alignment);
3676}
3677
3678late_initcall(pci_resource_alignment_sysfs_init);
3679
32a2eea7
JG
3680static void __devinit pci_no_domains(void)
3681{
3682#ifdef CONFIG_PCI_DOMAINS
3683 pci_domains_supported = 0;
3684#endif
3685}
3686
0ef5f8f6
AP
3687/**
3688 * pci_ext_cfg_enabled - can we access extended PCI config space?
3689 * @dev: The PCI device of the root bridge.
3690 *
3691 * Returns 1 if we can access PCI extended config space (offsets
3692 * greater than 0xff). This is the default implementation. Architecture
3693 * implementations can override this.
3694 */
3695int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3696{
3697 return 1;
3698}
3699
2d1c8618
BH
3700void __weak pci_fixup_cardbus(struct pci_bus *bus)
3701{
3702}
3703EXPORT_SYMBOL(pci_fixup_cardbus);
3704
ad04d31e 3705static int __init pci_setup(char *str)
1da177e4
LT
3706{
3707 while (str) {
3708 char *k = strchr(str, ',');
3709 if (k)
3710 *k++ = 0;
3711 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
3712 if (!strcmp(str, "nomsi")) {
3713 pci_no_msi();
7f785763
RD
3714 } else if (!strcmp(str, "noaer")) {
3715 pci_no_aer();
f483d392
RP
3716 } else if (!strncmp(str, "realloc", 7)) {
3717 pci_realloc();
32a2eea7
JG
3718 } else if (!strcmp(str, "nodomains")) {
3719 pci_no_domains();
4516a618
AN
3720 } else if (!strncmp(str, "cbiosize=", 9)) {
3721 pci_cardbus_io_size = memparse(str + 9, &str);
3722 } else if (!strncmp(str, "cbmemsize=", 10)) {
3723 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
3724 } else if (!strncmp(str, "resource_alignment=", 19)) {
3725 pci_set_resource_alignment_param(str + 19,
3726 strlen(str + 19));
43c16408
AP
3727 } else if (!strncmp(str, "ecrc=", 5)) {
3728 pcie_ecrc_get_policy(str + 5);
28760489
EB
3729 } else if (!strncmp(str, "hpiosize=", 9)) {
3730 pci_hotplug_io_size = memparse(str + 9, &str);
3731 } else if (!strncmp(str, "hpmemsize=", 10)) {
3732 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
3733 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3734 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
3735 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3736 pcie_bus_config = PCIE_BUS_SAFE;
3737 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3738 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
3739 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3740 pcie_bus_config = PCIE_BUS_PEER2PEER;
309e57df
MW
3741 } else {
3742 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3743 str);
3744 }
1da177e4
LT
3745 }
3746 str = k;
3747 }
0637a70a 3748 return 0;
1da177e4 3749}
0637a70a 3750early_param("pci", pci_setup);
1da177e4 3751
0b62e13b 3752EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
3753EXPORT_SYMBOL(pci_enable_device_io);
3754EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 3755EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
3756EXPORT_SYMBOL(pcim_enable_device);
3757EXPORT_SYMBOL(pcim_pin_device);
1da177e4 3758EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
3759EXPORT_SYMBOL(pci_find_capability);
3760EXPORT_SYMBOL(pci_bus_find_capability);
3761EXPORT_SYMBOL(pci_release_regions);
3762EXPORT_SYMBOL(pci_request_regions);
e8de1481 3763EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
3764EXPORT_SYMBOL(pci_release_region);
3765EXPORT_SYMBOL(pci_request_region);
e8de1481 3766EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
3767EXPORT_SYMBOL(pci_release_selected_regions);
3768EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3769EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 3770EXPORT_SYMBOL(pci_set_master);
6a479079 3771EXPORT_SYMBOL(pci_clear_master);
1da177e4 3772EXPORT_SYMBOL(pci_set_mwi);
694625c0 3773EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 3774EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 3775EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
3776EXPORT_SYMBOL(pci_assign_resource);
3777EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 3778EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
3779
3780EXPORT_SYMBOL(pci_set_power_state);
3781EXPORT_SYMBOL(pci_save_state);
3782EXPORT_SYMBOL(pci_restore_state);
e5899e1b 3783EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 3784EXPORT_SYMBOL(pci_pme_active);
0235c4fc 3785EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 3786EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
3787EXPORT_SYMBOL(pci_prepare_to_sleep);
3788EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 3789EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
This page took 1.316826 seconds and 5 git commands to generate.