Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / drivers / pci / probe.c
CommitLineData
1da177e4
LT
1/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
50230713 9#include <linux/of_device.h>
de335bb4 10#include <linux/of_pci.h>
589fcc23 11#include <linux/pci_hotplug.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/cpumask.h>
7d715a6c 15#include <linux/pci-aspm.h>
b07461a8 16#include <linux/aer.h>
29dbe1f0 17#include <linux/acpi.h>
788858eb 18#include <linux/irqdomain.h>
d963f651 19#include <linux/pm_runtime.h>
bc56b9e0 20#include "pci.h"
1da177e4
LT
21
22#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23#define CARDBUS_RESERVE_BUSNR 3
1da177e4 24
0b950f0f 25static struct resource busn_resource = {
67cdc827
YL
26 .name = "PCI busn",
27 .start = 0,
28 .end = 255,
29 .flags = IORESOURCE_BUS,
30};
31
1da177e4
LT
32/* Ugh. Need to stop exporting this to modules. */
33LIST_HEAD(pci_root_buses);
34EXPORT_SYMBOL(pci_root_buses);
35
5cc62c20
YL
36static LIST_HEAD(pci_domain_busn_res_list);
37
38struct pci_domain_busn_res {
39 struct list_head list;
40 struct resource res;
41 int domain_nr;
42};
43
44static struct resource *get_pci_domain_busn_res(int domain_nr)
45{
46 struct pci_domain_busn_res *r;
47
48 list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 if (r->domain_nr == domain_nr)
50 return &r->res;
51
52 r = kzalloc(sizeof(*r), GFP_KERNEL);
53 if (!r)
54 return NULL;
55
56 r->domain_nr = domain_nr;
57 r->res.start = 0;
58 r->res.end = 0xff;
59 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60
61 list_add_tail(&r->list, &pci_domain_busn_res_list);
62
63 return &r->res;
64}
65
70308923
GKH
66static int find_anything(struct device *dev, void *data)
67{
68 return 1;
69}
1da177e4 70
ed4aaadb
ZY
71/*
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
70308923 74 * is no device to be found on the pci_bus_type.
ed4aaadb
ZY
75 */
76int no_pci_devices(void)
77{
70308923
GKH
78 struct device *dev;
79 int no_devices;
ed4aaadb 80
70308923
GKH
81 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 no_devices = (dev == NULL);
83 put_device(dev);
84 return no_devices;
85}
ed4aaadb
ZY
86EXPORT_SYMBOL(no_pci_devices);
87
1da177e4
LT
88/*
89 * PCI Bus Class
90 */
fd7d1ced 91static void release_pcibus_dev(struct device *dev)
1da177e4 92{
fd7d1ced 93 struct pci_bus *pci_bus = to_pci_bus(dev);
1da177e4 94
ff0387c3 95 put_device(pci_bus->bridge);
2fe2abf8 96 pci_bus_remove_resources(pci_bus);
98d9f30c 97 pci_release_bus_of_node(pci_bus);
1da177e4
LT
98 kfree(pci_bus);
99}
100
101static struct class pcibus_class = {
102 .name = "pci_bus",
fd7d1ced 103 .dev_release = &release_pcibus_dev,
56039e65 104 .dev_groups = pcibus_groups,
1da177e4
LT
105};
106
107static int __init pcibus_class_init(void)
108{
109 return class_register(&pcibus_class);
110}
111postcore_initcall(pcibus_class_init);
112
6ac665c6 113static u64 pci_size(u64 base, u64 maxbase, u64 mask)
1da177e4 114{
6ac665c6 115 u64 size = mask & maxbase; /* Find the significant bits */
1da177e4
LT
116 if (!size)
117 return 0;
118
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size = (size & ~(size-1)) - 1;
122
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base == maxbase && ((base | size) & mask) != mask)
126 return 0;
127
128 return size;
129}
130
28c6821a 131static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
6ac665c6 132{
8d6a6a47 133 u32 mem_type;
28c6821a 134 unsigned long flags;
8d6a6a47 135
6ac665c6 136 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
28c6821a
BH
137 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 flags |= IORESOURCE_IO;
139 return flags;
6ac665c6 140 }
07eddf3d 141
28c6821a
BH
142 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 flags |= IORESOURCE_MEM;
144 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 flags |= IORESOURCE_PREFETCH;
07eddf3d 146
8d6a6a47
BH
147 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 switch (mem_type) {
149 case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 break;
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
0ff9514b 152 /* 1M mem BAR treated as 32-bit BAR */
8d6a6a47
BH
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_64:
28c6821a
BH
155 flags |= IORESOURCE_MEM_64;
156 break;
8d6a6a47 157 default:
0ff9514b 158 /* mem unknown type treated as 32-bit BAR */
8d6a6a47
BH
159 break;
160 }
28c6821a 161 return flags;
07eddf3d
YL
162}
163
808e34e2
ZK
164#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165
0b400c7e
YZ
166/**
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
172 *
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
6ac665c6 174 */
0b400c7e 175int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
3c78bc61 176 struct resource *res, unsigned int pos)
07eddf3d 177{
6ac665c6 178 u32 l, sz, mask;
23b13bc7 179 u64 l64, sz64, mask64;
253d2e54 180 u16 orig_cmd;
cf4d1cf5 181 struct pci_bus_region region, inverted_region;
6ac665c6 182
1ed67439 183 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6ac665c6 184
0ff9514b 185 /* No printks while decoding is disabled! */
253d2e54
JP
186 if (!dev->mmio_always_on) {
187 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
808e34e2
ZK
188 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 pci_write_config_word(dev, PCI_COMMAND,
190 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 }
253d2e54
JP
192 }
193
6ac665c6
MW
194 res->name = pci_name(dev);
195
196 pci_read_config_dword(dev, pos, &l);
1ed67439 197 pci_write_config_dword(dev, pos, l | mask);
6ac665c6
MW
198 pci_read_config_dword(dev, pos, &sz);
199 pci_write_config_dword(dev, pos, l);
200
201 /*
202 * All bits set in sz means the device isn't working properly.
45aa23b4
BH
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 * 1 must be clear.
6ac665c6 206 */
f795d86a
MS
207 if (sz == 0xffffffff)
208 sz = 0;
6ac665c6
MW
209
210 /*
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
213 */
214 if (l == 0xffffffff)
215 l = 0;
216
217 if (type == pci_bar_unknown) {
28c6821a
BH
218 res->flags = decode_bar(dev, l);
219 res->flags |= IORESOURCE_SIZEALIGN;
220 if (res->flags & IORESOURCE_IO) {
f795d86a
MS
221 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
6ac665c6 224 } else {
f795d86a
MS
225 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
6ac665c6
MW
228 }
229 } else {
230 res->flags |= (l & IORESOURCE_ROM_ENABLE);
f795d86a
MS
231 l64 = l & PCI_ROM_ADDRESS_MASK;
232 sz64 = sz & PCI_ROM_ADDRESS_MASK;
233 mask64 = (u32)PCI_ROM_ADDRESS_MASK;
6ac665c6
MW
234 }
235
28c6821a 236 if (res->flags & IORESOURCE_MEM_64) {
6ac665c6
MW
237 pci_read_config_dword(dev, pos + 4, &l);
238 pci_write_config_dword(dev, pos + 4, ~0);
239 pci_read_config_dword(dev, pos + 4, &sz);
240 pci_write_config_dword(dev, pos + 4, l);
241
242 l64 |= ((u64)l << 32);
243 sz64 |= ((u64)sz << 32);
f795d86a
MS
244 mask64 |= ((u64)~0 << 32);
245 }
6ac665c6 246
f795d86a
MS
247 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
248 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
6ac665c6 249
f795d86a
MS
250 if (!sz64)
251 goto fail;
6ac665c6 252
f795d86a 253 sz64 = pci_size(l64, sz64, mask64);
7e79c5f8
MS
254 if (!sz64) {
255 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
256 pos);
f795d86a 257 goto fail;
7e79c5f8 258 }
f795d86a
MS
259
260 if (res->flags & IORESOURCE_MEM_64) {
3a9ad0b4
YL
261 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
262 && sz64 > 0x100000000ULL) {
23b13bc7
BH
263 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
264 res->start = 0;
265 res->end = 0;
f795d86a
MS
266 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
267 pos, (unsigned long long)sz64);
23b13bc7 268 goto out;
c7dabef8
BH
269 }
270
3a9ad0b4 271 if ((sizeof(pci_bus_addr_t) < 8) && l) {
31e9dd25 272 /* Above 32-bit boundary; try to reallocate */
c83bd900 273 res->flags |= IORESOURCE_UNSET;
72dc5601
BH
274 res->start = 0;
275 res->end = sz64;
f795d86a
MS
276 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
277 pos, (unsigned long long)l64);
72dc5601 278 goto out;
6ac665c6 279 }
6ac665c6
MW
280 }
281
f795d86a
MS
282 region.start = l64;
283 region.end = l64 + sz64;
284
fc279850
YL
285 pcibios_bus_to_resource(dev->bus, res, &region);
286 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
cf4d1cf5
KH
287
288 /*
289 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
290 * the corresponding resource address (the physical address used by
291 * the CPU. Converting that resource address back to a bus address
292 * should yield the original BAR value:
293 *
294 * resource_to_bus(bus_to_resource(A)) == A
295 *
296 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
297 * be claimed by the device.
298 */
299 if (inverted_region.start != region.start) {
cf4d1cf5 300 res->flags |= IORESOURCE_UNSET;
cf4d1cf5 301 res->start = 0;
26370fc6 302 res->end = region.end - region.start;
f795d86a
MS
303 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
304 pos, (unsigned long long)region.start);
cf4d1cf5 305 }
96ddef25 306
0ff9514b
BH
307 goto out;
308
309
310fail:
311 res->flags = 0;
312out:
31e9dd25 313 if (res->flags)
33963e30 314 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
0ff9514b 315
28c6821a 316 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
07eddf3d
YL
317}
318
1da177e4
LT
319static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
320{
6ac665c6 321 unsigned int pos, reg;
07eddf3d 322
ad67b437
PB
323 if (dev->non_compliant_bars)
324 return;
325
6ac665c6
MW
326 for (pos = 0; pos < howmany; pos++) {
327 struct resource *res = &dev->resource[pos];
1da177e4 328 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
6ac665c6 329 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
1da177e4 330 }
6ac665c6 331
1da177e4 332 if (rom) {
6ac665c6 333 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
1da177e4 334 dev->rom_base_reg = rom;
6ac665c6 335 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
92b19ff5 336 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
6ac665c6 337 __pci_read_base(dev, pci_bar_mem32, res, rom);
1da177e4
LT
338 }
339}
340
15856ad5 341static void pci_read_bridge_io(struct pci_bus *child)
1da177e4
LT
342{
343 struct pci_dev *dev = child->self;
344 u8 io_base_lo, io_limit_lo;
2b28ae19 345 unsigned long io_mask, io_granularity, base, limit;
5bfa14ed 346 struct pci_bus_region region;
2b28ae19
BH
347 struct resource *res;
348
349 io_mask = PCI_IO_RANGE_MASK;
350 io_granularity = 0x1000;
351 if (dev->io_window_1k) {
352 /* Support 1K I/O space granularity */
353 io_mask = PCI_IO_1K_RANGE_MASK;
354 io_granularity = 0x400;
355 }
1da177e4 356
1da177e4
LT
357 res = child->resource[0];
358 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
359 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
2b28ae19
BH
360 base = (io_base_lo & io_mask) << 8;
361 limit = (io_limit_lo & io_mask) << 8;
1da177e4
LT
362
363 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
364 u16 io_base_hi, io_limit_hi;
8f38eaca 365
1da177e4
LT
366 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
367 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
8f38eaca
BH
368 base |= ((unsigned long) io_base_hi << 16);
369 limit |= ((unsigned long) io_limit_hi << 16);
1da177e4
LT
370 }
371
5dde383e 372 if (base <= limit) {
1da177e4 373 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
5bfa14ed 374 region.start = base;
2b28ae19 375 region.end = limit + io_granularity - 1;
fc279850 376 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 377 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 378 }
fa27b2d1
BH
379}
380
15856ad5 381static void pci_read_bridge_mmio(struct pci_bus *child)
fa27b2d1
BH
382{
383 struct pci_dev *dev = child->self;
384 u16 mem_base_lo, mem_limit_lo;
385 unsigned long base, limit;
5bfa14ed 386 struct pci_bus_region region;
fa27b2d1 387 struct resource *res;
1da177e4
LT
388
389 res = child->resource[1];
390 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
391 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
8f38eaca
BH
392 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
393 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
5dde383e 394 if (base <= limit) {
1da177e4 395 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
5bfa14ed
BH
396 region.start = base;
397 region.end = limit + 0xfffff;
fc279850 398 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 399 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 400 }
fa27b2d1
BH
401}
402
15856ad5 403static void pci_read_bridge_mmio_pref(struct pci_bus *child)
fa27b2d1
BH
404{
405 struct pci_dev *dev = child->self;
406 u16 mem_base_lo, mem_limit_lo;
7fc986d8 407 u64 base64, limit64;
3a9ad0b4 408 pci_bus_addr_t base, limit;
5bfa14ed 409 struct pci_bus_region region;
fa27b2d1 410 struct resource *res;
1da177e4
LT
411
412 res = child->resource[2];
413 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
414 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
7fc986d8
YL
415 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
416 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1da177e4
LT
417
418 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
419 u32 mem_base_hi, mem_limit_hi;
8f38eaca 420
1da177e4
LT
421 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
422 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
423
424 /*
425 * Some bridges set the base > limit by default, and some
426 * (broken) BIOSes do not initialize them. If we find
427 * this, just assume they are not being used.
428 */
429 if (mem_base_hi <= mem_limit_hi) {
7fc986d8
YL
430 base64 |= (u64) mem_base_hi << 32;
431 limit64 |= (u64) mem_limit_hi << 32;
1da177e4
LT
432 }
433 }
7fc986d8 434
3a9ad0b4
YL
435 base = (pci_bus_addr_t) base64;
436 limit = (pci_bus_addr_t) limit64;
7fc986d8
YL
437
438 if (base != base64) {
439 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
440 (unsigned long long) base64);
441 return;
442 }
443
5dde383e 444 if (base <= limit) {
1f82de10
YL
445 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
446 IORESOURCE_MEM | IORESOURCE_PREFETCH;
447 if (res->flags & PCI_PREF_RANGE_TYPE_64)
448 res->flags |= IORESOURCE_MEM_64;
5bfa14ed
BH
449 region.start = base;
450 region.end = limit + 0xfffff;
fc279850 451 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 452 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4
LT
453 }
454}
455
15856ad5 456void pci_read_bridge_bases(struct pci_bus *child)
fa27b2d1
BH
457{
458 struct pci_dev *dev = child->self;
2fe2abf8 459 struct resource *res;
fa27b2d1
BH
460 int i;
461
462 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
463 return;
464
b918c62e
YL
465 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
466 &child->busn_res,
fa27b2d1
BH
467 dev->transparent ? " (subtractive decode)" : "");
468
2fe2abf8
BH
469 pci_bus_remove_resources(child);
470 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
471 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
472
fa27b2d1
BH
473 pci_read_bridge_io(child);
474 pci_read_bridge_mmio(child);
475 pci_read_bridge_mmio_pref(child);
2adf7516
BH
476
477 if (dev->transparent) {
2fe2abf8 478 pci_bus_for_each_resource(child->parent, res, i) {
d739a099 479 if (res && res->flags) {
2fe2abf8
BH
480 pci_bus_add_resource(child, res,
481 PCI_SUBTRACTIVE_DECODE);
2adf7516
BH
482 dev_printk(KERN_DEBUG, &dev->dev,
483 " bridge window %pR (subtractive decode)\n",
2fe2abf8
BH
484 res);
485 }
2adf7516
BH
486 }
487 }
fa27b2d1
BH
488}
489
670ba0c8 490static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
1da177e4
LT
491{
492 struct pci_bus *b;
493
f5afe806 494 b = kzalloc(sizeof(*b), GFP_KERNEL);
05013486
BH
495 if (!b)
496 return NULL;
497
498 INIT_LIST_HEAD(&b->node);
499 INIT_LIST_HEAD(&b->children);
500 INIT_LIST_HEAD(&b->devices);
501 INIT_LIST_HEAD(&b->slots);
502 INIT_LIST_HEAD(&b->resources);
503 b->max_bus_speed = PCI_SPEED_UNKNOWN;
504 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
670ba0c8
CM
505#ifdef CONFIG_PCI_DOMAINS_GENERIC
506 if (parent)
507 b->domain_nr = parent->domain_nr;
508#endif
1da177e4
LT
509 return b;
510}
511
70efde2a
JL
512static void pci_release_host_bridge_dev(struct device *dev)
513{
514 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
515
516 if (bridge->release_fn)
517 bridge->release_fn(bridge);
518
519 pci_free_resource_list(&bridge->windows);
520
521 kfree(bridge);
522}
523
7b543663
YL
524static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
525{
526 struct pci_host_bridge *bridge;
527
528 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
05013486
BH
529 if (!bridge)
530 return NULL;
7b543663 531
05013486
BH
532 INIT_LIST_HEAD(&bridge->windows);
533 bridge->bus = b;
7b543663
YL
534 return bridge;
535}
536
0b950f0f 537static const unsigned char pcix_bus_speed[] = {
9be60ca0
MW
538 PCI_SPEED_UNKNOWN, /* 0 */
539 PCI_SPEED_66MHz_PCIX, /* 1 */
540 PCI_SPEED_100MHz_PCIX, /* 2 */
541 PCI_SPEED_133MHz_PCIX, /* 3 */
542 PCI_SPEED_UNKNOWN, /* 4 */
543 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
544 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
545 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
546 PCI_SPEED_UNKNOWN, /* 8 */
547 PCI_SPEED_66MHz_PCIX_266, /* 9 */
548 PCI_SPEED_100MHz_PCIX_266, /* A */
549 PCI_SPEED_133MHz_PCIX_266, /* B */
550 PCI_SPEED_UNKNOWN, /* C */
551 PCI_SPEED_66MHz_PCIX_533, /* D */
552 PCI_SPEED_100MHz_PCIX_533, /* E */
553 PCI_SPEED_133MHz_PCIX_533 /* F */
554};
555
343e51ae 556const unsigned char pcie_link_speed[] = {
3749c51a
MW
557 PCI_SPEED_UNKNOWN, /* 0 */
558 PCIE_SPEED_2_5GT, /* 1 */
559 PCIE_SPEED_5_0GT, /* 2 */
9dfd97fe 560 PCIE_SPEED_8_0GT, /* 3 */
3749c51a
MW
561 PCI_SPEED_UNKNOWN, /* 4 */
562 PCI_SPEED_UNKNOWN, /* 5 */
563 PCI_SPEED_UNKNOWN, /* 6 */
564 PCI_SPEED_UNKNOWN, /* 7 */
565 PCI_SPEED_UNKNOWN, /* 8 */
566 PCI_SPEED_UNKNOWN, /* 9 */
567 PCI_SPEED_UNKNOWN, /* A */
568 PCI_SPEED_UNKNOWN, /* B */
569 PCI_SPEED_UNKNOWN, /* C */
570 PCI_SPEED_UNKNOWN, /* D */
571 PCI_SPEED_UNKNOWN, /* E */
572 PCI_SPEED_UNKNOWN /* F */
573};
574
575void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
576{
231afea1 577 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
3749c51a
MW
578}
579EXPORT_SYMBOL_GPL(pcie_update_link_speed);
580
45b4cdd5
MW
581static unsigned char agp_speeds[] = {
582 AGP_UNKNOWN,
583 AGP_1X,
584 AGP_2X,
585 AGP_4X,
586 AGP_8X
587};
588
589static enum pci_bus_speed agp_speed(int agp3, int agpstat)
590{
591 int index = 0;
592
593 if (agpstat & 4)
594 index = 3;
595 else if (agpstat & 2)
596 index = 2;
597 else if (agpstat & 1)
598 index = 1;
599 else
600 goto out;
f7625980 601
45b4cdd5
MW
602 if (agp3) {
603 index += 2;
604 if (index == 5)
605 index = 0;
606 }
607
608 out:
609 return agp_speeds[index];
610}
611
9be60ca0
MW
612static void pci_set_bus_speed(struct pci_bus *bus)
613{
614 struct pci_dev *bridge = bus->self;
615 int pos;
616
45b4cdd5
MW
617 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
618 if (!pos)
619 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
620 if (pos) {
621 u32 agpstat, agpcmd;
622
623 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
624 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
625
626 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
627 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
628 }
629
9be60ca0
MW
630 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
631 if (pos) {
632 u16 status;
633 enum pci_bus_speed max;
9be60ca0 634
7793eeab
BH
635 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
636 &status);
637
638 if (status & PCI_X_SSTATUS_533MHZ) {
9be60ca0 639 max = PCI_SPEED_133MHz_PCIX_533;
7793eeab 640 } else if (status & PCI_X_SSTATUS_266MHZ) {
9be60ca0 641 max = PCI_SPEED_133MHz_PCIX_266;
7793eeab 642 } else if (status & PCI_X_SSTATUS_133MHZ) {
3c78bc61 643 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
9be60ca0 644 max = PCI_SPEED_133MHz_PCIX_ECC;
3c78bc61 645 else
9be60ca0 646 max = PCI_SPEED_133MHz_PCIX;
9be60ca0
MW
647 } else {
648 max = PCI_SPEED_66MHz_PCIX;
649 }
650
651 bus->max_bus_speed = max;
7793eeab
BH
652 bus->cur_bus_speed = pcix_bus_speed[
653 (status & PCI_X_SSTATUS_FREQ) >> 6];
9be60ca0
MW
654
655 return;
656 }
657
fdfe1511 658 if (pci_is_pcie(bridge)) {
9be60ca0
MW
659 u32 linkcap;
660 u16 linksta;
661
59875ae4 662 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
231afea1 663 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
9be60ca0 664
59875ae4 665 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
9be60ca0
MW
666 pcie_update_link_speed(bus, linksta);
667 }
668}
669
44aa0c65
MZ
670static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
671{
b165e2b6
MZ
672 struct irq_domain *d;
673
44aa0c65
MZ
674 /*
675 * Any firmware interface that can resolve the msi_domain
676 * should be called from here.
677 */
b165e2b6 678 d = pci_host_bridge_of_msi_domain(bus);
471036b2
SS
679 if (!d)
680 d = pci_host_bridge_acpi_msi_domain(bus);
44aa0c65 681
788858eb
JO
682#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
683 /*
684 * If no IRQ domain was found via the OF tree, try looking it up
685 * directly through the fwnode_handle.
686 */
687 if (!d) {
688 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
689
690 if (fwnode)
691 d = irq_find_matching_fwnode(fwnode,
692 DOMAIN_BUS_PCI_MSI);
693 }
694#endif
695
b165e2b6 696 return d;
44aa0c65
MZ
697}
698
699static void pci_set_bus_msi_domain(struct pci_bus *bus)
700{
701 struct irq_domain *d;
38ea72bd 702 struct pci_bus *b;
44aa0c65
MZ
703
704 /*
38ea72bd
AW
705 * The bus can be a root bus, a subordinate bus, or a virtual bus
706 * created by an SR-IOV device. Walk up to the first bridge device
707 * found or derive the domain from the host bridge.
44aa0c65 708 */
38ea72bd
AW
709 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
710 if (b->self)
711 d = dev_get_msi_domain(&b->self->dev);
712 }
713
714 if (!d)
715 d = pci_host_bridge_msi_domain(b);
44aa0c65
MZ
716
717 dev_set_msi_domain(&bus->dev, d);
718}
719
cbd4e055
AB
720static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
721 struct pci_dev *bridge, int busnr)
1da177e4
LT
722{
723 struct pci_bus *child;
724 int i;
4f535093 725 int ret;
1da177e4
LT
726
727 /*
728 * Allocate a new bus, and inherit stuff from the parent..
729 */
670ba0c8 730 child = pci_alloc_bus(parent);
1da177e4
LT
731 if (!child)
732 return NULL;
733
1da177e4
LT
734 child->parent = parent;
735 child->ops = parent->ops;
0cbdcfcf 736 child->msi = parent->msi;
1da177e4 737 child->sysdata = parent->sysdata;
6e325a62 738 child->bus_flags = parent->bus_flags;
1da177e4 739
fd7d1ced 740 /* initialize some portions of the bus device, but don't register it
4f535093 741 * now as the parent is not properly set up yet.
fd7d1ced
GKH
742 */
743 child->dev.class = &pcibus_class;
1a927133 744 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1da177e4
LT
745
746 /*
747 * Set up the primary, secondary and subordinate
748 * bus numbers.
749 */
b918c62e
YL
750 child->number = child->busn_res.start = busnr;
751 child->primary = parent->busn_res.start;
752 child->busn_res.end = 0xff;
1da177e4 753
4f535093
YL
754 if (!bridge) {
755 child->dev.parent = parent->bridge;
756 goto add_dev;
757 }
3789fa8a
YZ
758
759 child->self = bridge;
760 child->bridge = get_device(&bridge->dev);
4f535093 761 child->dev.parent = child->bridge;
98d9f30c 762 pci_set_bus_of_node(child);
9be60ca0
MW
763 pci_set_bus_speed(child);
764
1da177e4 765 /* Set up default resource pointers and names.. */
fde09c6d 766 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1da177e4
LT
767 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
768 child->resource[i]->name = child->name;
769 }
770 bridge->subordinate = child;
771
4f535093 772add_dev:
44aa0c65 773 pci_set_bus_msi_domain(child);
4f535093
YL
774 ret = device_register(&child->dev);
775 WARN_ON(ret < 0);
776
10a95747
JL
777 pcibios_add_bus(child);
778
057bd2e0
TR
779 if (child->ops->add_bus) {
780 ret = child->ops->add_bus(child);
781 if (WARN_ON(ret < 0))
782 dev_err(&child->dev, "failed to add bus: %d\n", ret);
783 }
784
4f535093
YL
785 /* Create legacy_io and legacy_mem files for this bus */
786 pci_create_legacy_files(child);
787
1da177e4
LT
788 return child;
789}
790
3c78bc61
RD
791struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
792 int busnr)
1da177e4
LT
793{
794 struct pci_bus *child;
795
796 child = pci_alloc_child_bus(parent, dev, busnr);
e4ea9bb7 797 if (child) {
d71374da 798 down_write(&pci_bus_sem);
1da177e4 799 list_add_tail(&child->node, &parent->children);
d71374da 800 up_write(&pci_bus_sem);
e4ea9bb7 801 }
1da177e4
LT
802 return child;
803}
b7fe9434 804EXPORT_SYMBOL(pci_add_new_bus);
1da177e4 805
f3dbd802
RJ
806static void pci_enable_crs(struct pci_dev *pdev)
807{
808 u16 root_cap = 0;
809
810 /* Enable CRS Software Visibility if supported */
811 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
812 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
813 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
814 PCI_EXP_RTCTL_CRSSVE);
815}
816
1da177e4
LT
817/*
818 * If it's a bridge, configure it and scan the bus behind it.
819 * For CardBus bridges, we don't scan behind as the devices will
820 * be handled by the bridge driver itself.
821 *
822 * We need to process bridges in two passes -- first we scan those
823 * already configured by the BIOS and after we are done with all of
824 * them, we proceed to assigning numbers to the remaining buses in
825 * order to avoid overlaps between old and new bus numbers.
826 */
15856ad5 827int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1da177e4
LT
828{
829 struct pci_bus *child;
830 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
49887941 831 u32 buses, i, j = 0;
1da177e4 832 u16 bctl;
99ddd552 833 u8 primary, secondary, subordinate;
a1c19894 834 int broken = 0;
1da177e4 835
d963f651
MW
836 /*
837 * Make sure the bridge is powered on to be able to access config
838 * space of devices below it.
839 */
840 pm_runtime_get_sync(&dev->dev);
841
1da177e4 842 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
99ddd552
BH
843 primary = buses & 0xFF;
844 secondary = (buses >> 8) & 0xFF;
845 subordinate = (buses >> 16) & 0xFF;
1da177e4 846
99ddd552
BH
847 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
848 secondary, subordinate, pass);
1da177e4 849
71f6bd4a
YL
850 if (!primary && (primary != bus->number) && secondary && subordinate) {
851 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
852 primary = bus->number;
853 }
854
a1c19894
BH
855 /* Check if setup is sensible at all */
856 if (!pass &&
1965f66e 857 (primary != bus->number || secondary <= bus->number ||
12d87069 858 secondary > subordinate)) {
1965f66e
YL
859 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
860 secondary, subordinate);
a1c19894
BH
861 broken = 1;
862 }
863
1da177e4 864 /* Disable MasterAbortMode during probing to avoid reporting
f7625980 865 of bus errors (in some architectures) */
1da177e4
LT
866 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
867 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
868 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
869
f3dbd802
RJ
870 pci_enable_crs(dev);
871
99ddd552
BH
872 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
873 !is_cardbus && !broken) {
874 unsigned int cmax;
1da177e4
LT
875 /*
876 * Bus already configured by firmware, process it in the first
877 * pass and just note the configuration.
878 */
879 if (pass)
bbe8f9a3 880 goto out;
1da177e4
LT
881
882 /*
2ed85823
AN
883 * The bus might already exist for two reasons: Either we are
884 * rescanning the bus or the bus is reachable through more than
885 * one bridge. The second case can happen with the i450NX
886 * chipset.
1da177e4 887 */
99ddd552 888 child = pci_find_bus(pci_domain_nr(bus), secondary);
74710ded 889 if (!child) {
99ddd552 890 child = pci_add_new_bus(bus, dev, secondary);
74710ded
AC
891 if (!child)
892 goto out;
99ddd552 893 child->primary = primary;
bc76b731 894 pci_bus_insert_busn_res(child, secondary, subordinate);
74710ded 895 child->bridge_ctl = bctl;
1da177e4
LT
896 }
897
1da177e4 898 cmax = pci_scan_child_bus(child);
c95b0bd6
AN
899 if (cmax > subordinate)
900 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
901 subordinate, cmax);
902 /* subordinate should equal child->busn_res.end */
903 if (subordinate > max)
904 max = subordinate;
1da177e4
LT
905 } else {
906 /*
907 * We need to assign a number to this bus which we always
908 * do in the second pass.
909 */
12f44f46 910 if (!pass) {
619c8c31 911 if (pcibios_assign_all_busses() || broken || is_cardbus)
12f44f46
IK
912 /* Temporarily disable forwarding of the
913 configuration cycles on all bridges in
914 this bus segment to avoid possible
915 conflicts in the second pass between two
916 bridges programmed with overlapping
917 bus ranges. */
918 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
919 buses & ~0xffffff);
bbe8f9a3 920 goto out;
12f44f46 921 }
1da177e4
LT
922
923 /* Clear errors */
924 pci_write_config_word(dev, PCI_STATUS, 0xffff);
925
7a0b33d4
BH
926 /* Prevent assigning a bus number that already exists.
927 * This can happen when a bridge is hot-plugged, so in
928 * this case we only re-scan this bus. */
b1a98b69
TC
929 child = pci_find_bus(pci_domain_nr(bus), max+1);
930 if (!child) {
9a4d7d87 931 child = pci_add_new_bus(bus, dev, max+1);
b1a98b69
TC
932 if (!child)
933 goto out;
12d87069 934 pci_bus_insert_busn_res(child, max+1, 0xff);
b1a98b69 935 }
9a4d7d87 936 max++;
1da177e4
LT
937 buses = (buses & 0xff000000)
938 | ((unsigned int)(child->primary) << 0)
b918c62e
YL
939 | ((unsigned int)(child->busn_res.start) << 8)
940 | ((unsigned int)(child->busn_res.end) << 16);
1da177e4
LT
941
942 /*
943 * yenta.c forces a secondary latency timer of 176.
944 * Copy that behaviour here.
945 */
946 if (is_cardbus) {
947 buses &= ~0xff000000;
948 buses |= CARDBUS_LATENCY_TIMER << 24;
949 }
7c867c88 950
1da177e4
LT
951 /*
952 * We need to blast all three values with a single write.
953 */
954 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
955
956 if (!is_cardbus) {
11949255 957 child->bridge_ctl = bctl;
1da177e4
LT
958 max = pci_scan_child_bus(child);
959 } else {
960 /*
961 * For CardBus bridges, we leave 4 bus numbers
962 * as cards with a PCI-to-PCI bridge can be
963 * inserted later.
964 */
3c78bc61 965 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
49887941 966 struct pci_bus *parent = bus;
cc57450f
RS
967 if (pci_find_bus(pci_domain_nr(bus),
968 max+i+1))
969 break;
49887941
DB
970 while (parent->parent) {
971 if ((!pcibios_assign_all_busses()) &&
b918c62e
YL
972 (parent->busn_res.end > max) &&
973 (parent->busn_res.end <= max+i)) {
49887941
DB
974 j = 1;
975 }
976 parent = parent->parent;
977 }
978 if (j) {
979 /*
980 * Often, there are two cardbus bridges
981 * -- try to leave one valid bus number
982 * for each one.
983 */
984 i /= 2;
985 break;
986 }
987 }
cc57450f 988 max += i;
1da177e4
LT
989 }
990 /*
991 * Set the subordinate bus number to its real value.
992 */
bc76b731 993 pci_bus_update_busn_res_end(child, max);
1da177e4
LT
994 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
995 }
996
cb3576fa
GH
997 sprintf(child->name,
998 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
999 pci_domain_nr(bus), child->number);
1da177e4 1000
d55bef51 1001 /* Has only triggered on CardBus, fixup is in yenta_socket */
49887941 1002 while (bus->parent) {
b918c62e
YL
1003 if ((child->busn_res.end > bus->busn_res.end) ||
1004 (child->number > bus->busn_res.end) ||
49887941 1005 (child->number < bus->number) ||
b918c62e 1006 (child->busn_res.end < bus->number)) {
227f0647 1007 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
b918c62e
YL
1008 &child->busn_res,
1009 (bus->number > child->busn_res.end &&
1010 bus->busn_res.end < child->number) ?
a6f29a98
JP
1011 "wholly" : "partially",
1012 bus->self->transparent ? " transparent" : "",
865df576 1013 dev_name(&bus->dev),
b918c62e 1014 &bus->busn_res);
49887941
DB
1015 }
1016 bus = bus->parent;
1017 }
1018
bbe8f9a3
RB
1019out:
1020 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1021
d963f651
MW
1022 pm_runtime_put(&dev->dev);
1023
1da177e4
LT
1024 return max;
1025}
b7fe9434 1026EXPORT_SYMBOL(pci_scan_bridge);
1da177e4
LT
1027
1028/*
1029 * Read interrupt line and base address registers.
1030 * The architecture-dependent code can tweak these, of course.
1031 */
1032static void pci_read_irq(struct pci_dev *dev)
1033{
1034 unsigned char irq;
1035
1036 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
ffeff788 1037 dev->pin = irq;
1da177e4
LT
1038 if (irq)
1039 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1040 dev->irq = irq;
1041}
1042
bb209c82 1043void set_pcie_port_type(struct pci_dev *pdev)
480b93b7
YZ
1044{
1045 int pos;
1046 u16 reg16;
d0751b98
YW
1047 int type;
1048 struct pci_dev *parent;
480b93b7
YZ
1049
1050 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1051 if (!pos)
1052 return;
0efea000 1053 pdev->pcie_cap = pos;
480b93b7 1054 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
786e2288 1055 pdev->pcie_flags_reg = reg16;
b03e7495
JM
1056 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1057 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
d0751b98
YW
1058
1059 /*
1060 * A Root Port is always the upstream end of a Link. No PCIe
1061 * component has two Links. Two Links are connected by a Switch
1062 * that has a Port on each Link and internal logic to connect the
1063 * two Ports.
1064 */
1065 type = pci_pcie_type(pdev);
1066 if (type == PCI_EXP_TYPE_ROOT_PORT)
1067 pdev->has_secondary_link = 1;
1068 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1069 type == PCI_EXP_TYPE_DOWNSTREAM) {
1070 parent = pci_upstream_bridge(pdev);
b35b1df5
YW
1071
1072 /*
1073 * Usually there's an upstream device (Root Port or Switch
1074 * Downstream Port), but we can't assume one exists.
1075 */
1076 if (parent && !parent->has_secondary_link)
d0751b98
YW
1077 pdev->has_secondary_link = 1;
1078 }
480b93b7
YZ
1079}
1080
bb209c82 1081void set_pcie_hotplug_bridge(struct pci_dev *pdev)
28760489 1082{
28760489
EB
1083 u32 reg32;
1084
59875ae4 1085 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
28760489
EB
1086 if (reg32 & PCI_EXP_SLTCAP_HPC)
1087 pdev->is_hotplug_bridge = 1;
1088}
1089
78916b00
AW
1090/**
1091 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1092 * @dev: PCI device
1093 *
1094 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1095 * when forwarding a type1 configuration request the bridge must check that
1096 * the extended register address field is zero. The bridge is not permitted
1097 * to forward the transactions and must handle it as an Unsupported Request.
1098 * Some bridges do not follow this rule and simply drop the extended register
1099 * bits, resulting in the standard config space being aliased, every 256
1100 * bytes across the entire configuration space. Test for this condition by
1101 * comparing the first dword of each potential alias to the vendor/device ID.
1102 * Known offenders:
1103 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1104 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1105 */
1106static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1107{
1108#ifdef CONFIG_PCI_QUIRKS
1109 int pos;
1110 u32 header, tmp;
1111
1112 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1113
1114 for (pos = PCI_CFG_SPACE_SIZE;
1115 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1116 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1117 || header != tmp)
1118 return false;
1119 }
1120
1121 return true;
1122#else
1123 return false;
1124#endif
1125}
1126
0b950f0f
SH
1127/**
1128 * pci_cfg_space_size - get the configuration space size of the PCI device.
1129 * @dev: PCI device
1130 *
1131 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1132 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1133 * access it. Maybe we don't have a way to generate extended config space
1134 * accesses, or the device is behind a reverse Express bridge. So we try
1135 * reading the dword at 0x100 which must either be 0 or a valid extended
1136 * capability header.
1137 */
1138static int pci_cfg_space_size_ext(struct pci_dev *dev)
1139{
1140 u32 status;
1141 int pos = PCI_CFG_SPACE_SIZE;
1142
1143 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
8e5a395a 1144 return PCI_CFG_SPACE_SIZE;
78916b00 1145 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
8e5a395a 1146 return PCI_CFG_SPACE_SIZE;
0b950f0f
SH
1147
1148 return PCI_CFG_SPACE_EXP_SIZE;
0b950f0f
SH
1149}
1150
1151int pci_cfg_space_size(struct pci_dev *dev)
1152{
1153 int pos;
1154 u32 status;
1155 u16 class;
1156
1157 class = dev->class >> 8;
1158 if (class == PCI_CLASS_BRIDGE_HOST)
1159 return pci_cfg_space_size_ext(dev);
1160
8e5a395a
BH
1161 if (pci_is_pcie(dev))
1162 return pci_cfg_space_size_ext(dev);
0b950f0f 1163
8e5a395a
BH
1164 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1165 if (!pos)
1166 return PCI_CFG_SPACE_SIZE;
0b950f0f 1167
8e5a395a
BH
1168 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1169 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1170 return pci_cfg_space_size_ext(dev);
0b950f0f 1171
0b950f0f
SH
1172 return PCI_CFG_SPACE_SIZE;
1173}
1174
01abc2aa 1175#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
76e6a1d6 1176
e80e7edc 1177static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1851617c
MT
1178{
1179 /*
1180 * Disable the MSI hardware to avoid screaming interrupts
1181 * during boot. This is the power on reset default so
1182 * usually this should be a noop.
1183 */
1184 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1185 if (dev->msi_cap)
1186 pci_msi_set_enable(dev, 0);
1187
1188 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1189 if (dev->msix_cap)
1190 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1191}
1192
1da177e4
LT
1193/**
1194 * pci_setup_device - fill in class and map information of a device
1195 * @dev: the device structure to fill
1196 *
f7625980 1197 * Initialize the device structure with information about the device's
1da177e4
LT
1198 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1199 * Called at initialisation of the PCI subsystem and by CardBus services.
480b93b7
YZ
1200 * Returns 0 on success and negative if unknown type of device (not normal,
1201 * bridge or CardBus).
1da177e4 1202 */
480b93b7 1203int pci_setup_device(struct pci_dev *dev)
1da177e4
LT
1204{
1205 u32 class;
b84106b4 1206 u16 cmd;
480b93b7 1207 u8 hdr_type;
bc577d2b 1208 int pos = 0;
5bfa14ed
BH
1209 struct pci_bus_region region;
1210 struct resource *res;
480b93b7
YZ
1211
1212 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1213 return -EIO;
1214
1215 dev->sysdata = dev->bus->sysdata;
1216 dev->dev.parent = dev->bus->bridge;
1217 dev->dev.bus = &pci_bus_type;
1218 dev->hdr_type = hdr_type & 0x7f;
1219 dev->multifunction = !!(hdr_type & 0x80);
480b93b7
YZ
1220 dev->error_state = pci_channel_io_normal;
1221 set_pcie_port_type(dev);
1222
017ffe64 1223 pci_dev_assign_slot(dev);
480b93b7
YZ
1224 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1225 set this higher, assuming the system even supports it. */
1226 dev->dma_mask = 0xffffffff;
1da177e4 1227
eebfcfb5
GKH
1228 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1229 dev->bus->number, PCI_SLOT(dev->devfn),
1230 PCI_FUNC(dev->devfn));
1da177e4
LT
1231
1232 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
b8a3a521 1233 dev->revision = class & 0xff;
2dd8ba92 1234 dev->class = class >> 8; /* upper 3 bytes */
1da177e4 1235
2dd8ba92
YL
1236 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1237 dev->vendor, dev->device, dev->hdr_type, dev->class);
1da177e4 1238
853346e4
YZ
1239 /* need to have dev->class ready */
1240 dev->cfg_size = pci_cfg_space_size(dev);
1241
1da177e4 1242 /* "Unknown power state" */
3fe9d19f 1243 dev->current_state = PCI_UNKNOWN;
1da177e4
LT
1244
1245 /* Early fixups, before probing the BARs */
1246 pci_fixup_device(pci_fixup_early, dev);
f79b1b14
YZ
1247 /* device class may be changed after fixup */
1248 class = dev->class >> 8;
1da177e4 1249
b84106b4
BH
1250 if (dev->non_compliant_bars) {
1251 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1252 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1253 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1254 cmd &= ~PCI_COMMAND_IO;
1255 cmd &= ~PCI_COMMAND_MEMORY;
1256 pci_write_config_word(dev, PCI_COMMAND, cmd);
1257 }
1258 }
1259
1da177e4
LT
1260 switch (dev->hdr_type) { /* header type */
1261 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1262 if (class == PCI_CLASS_BRIDGE_PCI)
1263 goto bad;
1264 pci_read_irq(dev);
1265 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1266 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1267 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
368c73d4
AC
1268
1269 /*
075eb9e3
BH
1270 * Do the ugly legacy mode stuff here rather than broken chip
1271 * quirk code. Legacy mode ATA controllers have fixed
1272 * addresses. These are not always echoed in BAR0-3, and
1273 * BAR0-3 in a few cases contain junk!
368c73d4
AC
1274 */
1275 if (class == PCI_CLASS_STORAGE_IDE) {
1276 u8 progif;
1277 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1278 if ((progif & 1) == 0) {
5bfa14ed
BH
1279 region.start = 0x1F0;
1280 region.end = 0x1F7;
1281 res = &dev->resource[0];
1282 res->flags = LEGACY_IO_RESOURCE;
fc279850 1283 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1284 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1285 res);
5bfa14ed
BH
1286 region.start = 0x3F6;
1287 region.end = 0x3F6;
1288 res = &dev->resource[1];
1289 res->flags = LEGACY_IO_RESOURCE;
fc279850 1290 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1291 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1292 res);
368c73d4
AC
1293 }
1294 if ((progif & 4) == 0) {
5bfa14ed
BH
1295 region.start = 0x170;
1296 region.end = 0x177;
1297 res = &dev->resource[2];
1298 res->flags = LEGACY_IO_RESOURCE;
fc279850 1299 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1300 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1301 res);
5bfa14ed
BH
1302 region.start = 0x376;
1303 region.end = 0x376;
1304 res = &dev->resource[3];
1305 res->flags = LEGACY_IO_RESOURCE;
fc279850 1306 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1307 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1308 res);
368c73d4
AC
1309 }
1310 }
1da177e4
LT
1311 break;
1312
1313 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1314 if (class != PCI_CLASS_BRIDGE_PCI)
1315 goto bad;
1316 /* The PCI-to-PCI bridge spec requires that subtractive
1317 decoding (i.e. transparent) bridge must have programming
f7625980 1318 interface code of 0x01. */
3efd273b 1319 pci_read_irq(dev);
1da177e4
LT
1320 dev->transparent = ((dev->class & 0xff) == 1);
1321 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
28760489 1322 set_pcie_hotplug_bridge(dev);
bc577d2b
GB
1323 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1324 if (pos) {
1325 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1326 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1327 }
1da177e4
LT
1328 break;
1329
1330 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1331 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1332 goto bad;
1333 pci_read_irq(dev);
1334 pci_read_bases(dev, 1, 0);
1335 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1336 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1337 break;
1338
1339 default: /* unknown header */
227f0647
RD
1340 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1341 dev->hdr_type);
480b93b7 1342 return -EIO;
1da177e4
LT
1343
1344 bad:
227f0647
RD
1345 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1346 dev->class, dev->hdr_type);
2b4aed1d 1347 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1da177e4
LT
1348 }
1349
1350 /* We found a fine healthy device, go go go... */
1351 return 0;
1352}
1353
9dae3a97
BH
1354static void pci_configure_mps(struct pci_dev *dev)
1355{
1356 struct pci_dev *bridge = pci_upstream_bridge(dev);
27d868b5 1357 int mps, p_mps, rc;
9dae3a97
BH
1358
1359 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1360 return;
1361
1362 mps = pcie_get_mps(dev);
1363 p_mps = pcie_get_mps(bridge);
1364
1365 if (mps == p_mps)
1366 return;
1367
1368 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1369 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1370 mps, pci_name(bridge), p_mps);
1371 return;
1372 }
27d868b5
KB
1373
1374 /*
1375 * Fancier MPS configuration is done later by
1376 * pcie_bus_configure_settings()
1377 */
1378 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1379 return;
1380
1381 rc = pcie_set_mps(dev, p_mps);
1382 if (rc) {
1383 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1384 p_mps);
1385 return;
1386 }
1387
1388 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1389 p_mps, mps, 128 << dev->pcie_mpss);
9dae3a97
BH
1390}
1391
589fcc23
BH
1392static struct hpp_type0 pci_default_type0 = {
1393 .revision = 1,
1394 .cache_line_size = 8,
1395 .latency_timer = 0x40,
1396 .enable_serr = 0,
1397 .enable_perr = 0,
1398};
1399
1400static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1401{
1402 u16 pci_cmd, pci_bctl;
1403
c6285fc5 1404 if (!hpp)
589fcc23 1405 hpp = &pci_default_type0;
589fcc23
BH
1406
1407 if (hpp->revision > 1) {
1408 dev_warn(&dev->dev,
1409 "PCI settings rev %d not supported; using defaults\n",
1410 hpp->revision);
1411 hpp = &pci_default_type0;
1412 }
1413
1414 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1415 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1416 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1417 if (hpp->enable_serr)
1418 pci_cmd |= PCI_COMMAND_SERR;
589fcc23
BH
1419 if (hpp->enable_perr)
1420 pci_cmd |= PCI_COMMAND_PARITY;
589fcc23
BH
1421 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1422
1423 /* Program bridge control value */
1424 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1425 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1426 hpp->latency_timer);
1427 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1428 if (hpp->enable_serr)
1429 pci_bctl |= PCI_BRIDGE_CTL_SERR;
589fcc23
BH
1430 if (hpp->enable_perr)
1431 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
589fcc23
BH
1432 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1433 }
1434}
1435
1436static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1437{
1438 if (hpp)
1439 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1440}
1441
1442static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1443{
1444 int pos;
1445 u32 reg32;
1446
1447 if (!hpp)
1448 return;
1449
1450 if (hpp->revision > 1) {
1451 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1452 hpp->revision);
1453 return;
1454 }
1455
302328c0
BH
1456 /*
1457 * Don't allow _HPX to change MPS or MRRS settings. We manage
1458 * those to make sure they're consistent with the rest of the
1459 * platform.
1460 */
1461 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1462 PCI_EXP_DEVCTL_READRQ;
1463 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1464 PCI_EXP_DEVCTL_READRQ);
1465
589fcc23
BH
1466 /* Initialize Device Control Register */
1467 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1468 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1469
1470 /* Initialize Link Control Register */
7a1562d4 1471 if (pcie_cap_has_lnkctl(dev))
589fcc23
BH
1472 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1473 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1474
1475 /* Find Advanced Error Reporting Enhanced Capability */
1476 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1477 if (!pos)
1478 return;
1479
1480 /* Initialize Uncorrectable Error Mask Register */
1481 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1482 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1483 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1484
1485 /* Initialize Uncorrectable Error Severity Register */
1486 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1487 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1488 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1489
1490 /* Initialize Correctable Error Mask Register */
1491 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1492 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1493 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1494
1495 /* Initialize Advanced Error Capabilities and Control Register */
1496 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1497 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1498 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1499
1500 /*
1501 * FIXME: The following two registers are not supported yet.
1502 *
1503 * o Secondary Uncorrectable Error Severity Register
1504 * o Secondary Uncorrectable Error Mask Register
1505 */
1506}
1507
6cd33649
BH
1508static void pci_configure_device(struct pci_dev *dev)
1509{
1510 struct hotplug_params hpp;
1511 int ret;
1512
9dae3a97
BH
1513 pci_configure_mps(dev);
1514
6cd33649
BH
1515 memset(&hpp, 0, sizeof(hpp));
1516 ret = pci_get_hp_params(dev, &hpp);
1517 if (ret)
1518 return;
1519
1520 program_hpp_type2(dev, hpp.t2);
1521 program_hpp_type1(dev, hpp.t1);
1522 program_hpp_type0(dev, hpp.t0);
1523}
1524
201de56e
ZY
1525static void pci_release_capabilities(struct pci_dev *dev)
1526{
1527 pci_vpd_release(dev);
d1b054da 1528 pci_iov_release(dev);
f796841e 1529 pci_free_cap_save_buffers(dev);
201de56e
ZY
1530}
1531
1da177e4
LT
1532/**
1533 * pci_release_dev - free a pci device structure when all users of it are finished.
1534 * @dev: device that's been disconnected
1535 *
1536 * Will be called only by the device core when all users of this pci device are
1537 * done.
1538 */
1539static void pci_release_dev(struct device *dev)
1540{
04480094 1541 struct pci_dev *pci_dev;
1da177e4 1542
04480094 1543 pci_dev = to_pci_dev(dev);
201de56e 1544 pci_release_capabilities(pci_dev);
98d9f30c 1545 pci_release_of_node(pci_dev);
6ae32c53 1546 pcibios_release_device(pci_dev);
8b1fce04 1547 pci_bus_put(pci_dev->bus);
782a985d 1548 kfree(pci_dev->driver_override);
338c3149 1549 kfree(pci_dev->dma_alias_mask);
1da177e4
LT
1550 kfree(pci_dev);
1551}
1552
3c6e6ae7 1553struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
65891215
ME
1554{
1555 struct pci_dev *dev;
1556
1557 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1558 if (!dev)
1559 return NULL;
1560
65891215 1561 INIT_LIST_HEAD(&dev->bus_list);
88e7b167 1562 dev->dev.type = &pci_dev_type;
3c6e6ae7 1563 dev->bus = pci_bus_get(bus);
65891215
ME
1564
1565 return dev;
1566}
3c6e6ae7
GZ
1567EXPORT_SYMBOL(pci_alloc_dev);
1568
efdc87da 1569bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
3c78bc61 1570 int crs_timeout)
1da177e4 1571{
1da177e4
LT
1572 int delay = 1;
1573
efdc87da
YL
1574 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1575 return false;
1da177e4
LT
1576
1577 /* some broken boards return 0 or ~0 if a slot is empty: */
efdc87da
YL
1578 if (*l == 0xffffffff || *l == 0x00000000 ||
1579 *l == 0x0000ffff || *l == 0xffff0000)
1580 return false;
1da177e4 1581
89665a6a
RJ
1582 /*
1583 * Configuration Request Retry Status. Some root ports return the
1584 * actual device ID instead of the synthetic ID (0xFFFF) required
1585 * by the PCIe spec. Ignore the device ID and only check for
1586 * (vendor id == 1).
1587 */
1588 while ((*l & 0xffff) == 0x0001) {
efdc87da
YL
1589 if (!crs_timeout)
1590 return false;
1591
1da177e4
LT
1592 msleep(delay);
1593 delay *= 2;
efdc87da
YL
1594 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1595 return false;
1da177e4 1596 /* Card hasn't responded in 60 seconds? Must be stuck. */
efdc87da 1597 if (delay > crs_timeout) {
227f0647
RD
1598 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1599 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1600 PCI_FUNC(devfn));
efdc87da 1601 return false;
1da177e4
LT
1602 }
1603 }
1604
efdc87da
YL
1605 return true;
1606}
1607EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1608
1609/*
1610 * Read the config data for a PCI device, sanity-check it
1611 * and fill in the dev structure...
1612 */
1613static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1614{
1615 struct pci_dev *dev;
1616 u32 l;
1617
1618 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1619 return NULL;
1620
8b1fce04 1621 dev = pci_alloc_dev(bus);
1da177e4
LT
1622 if (!dev)
1623 return NULL;
1624
1da177e4 1625 dev->devfn = devfn;
1da177e4
LT
1626 dev->vendor = l & 0xffff;
1627 dev->device = (l >> 16) & 0xffff;
cef354db 1628
98d9f30c
BH
1629 pci_set_of_node(dev);
1630
480b93b7 1631 if (pci_setup_device(dev)) {
8b1fce04 1632 pci_bus_put(dev->bus);
1da177e4
LT
1633 kfree(dev);
1634 return NULL;
1635 }
1da177e4
LT
1636
1637 return dev;
1638}
1639
201de56e
ZY
1640static void pci_init_capabilities(struct pci_dev *dev)
1641{
938174e5
SS
1642 /* Enhanced Allocation */
1643 pci_ea_init(dev);
1644
e80e7edc
GP
1645 /* Setup MSI caps & disable MSI/MSI-X interrupts */
1646 pci_msi_setup_pci_dev(dev);
201de56e 1647
63f4898a
RW
1648 /* Buffers for saving PCIe and PCI-X capabilities */
1649 pci_allocate_cap_save_buffers(dev);
1650
201de56e
ZY
1651 /* Power Management */
1652 pci_pm_init(dev);
1653
1654 /* Vital Product Data */
f1cd93f9 1655 pci_vpd_init(dev);
58c3a727
YZ
1656
1657 /* Alternative Routing-ID Forwarding */
31ab2476 1658 pci_configure_ari(dev);
d1b054da
YZ
1659
1660 /* Single Root I/O Virtualization */
1661 pci_iov_init(dev);
ae21ee65 1662
edc90fee
BH
1663 /* Address Translation Services */
1664 pci_ats_init(dev);
1665
ae21ee65 1666 /* Enable ACS P2P upstream forwarding */
5d990b62 1667 pci_enable_acs(dev);
b07461a8
TI
1668
1669 pci_cleanup_aer_error_status_regs(dev);
9bb04a0c
JY
1670
1671 /* Precision Time Measurement */
1672 pci_ptm_init(dev);
201de56e
ZY
1673}
1674
098259eb
MZ
1675/*
1676 * This is the equivalent of pci_host_bridge_msi_domain that acts on
1677 * devices. Firmware interfaces that can select the MSI domain on a
1678 * per-device basis should be called from here.
1679 */
1680static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1681{
1682 struct irq_domain *d;
1683
1684 /*
1685 * If a domain has been set through the pcibios_add_device
1686 * callback, then this is the one (platform code knows best).
1687 */
1688 d = dev_get_msi_domain(&dev->dev);
1689 if (d)
1690 return d;
1691
54fa97ee
MZ
1692 /*
1693 * Let's see if we have a firmware interface able to provide
1694 * the domain.
1695 */
1696 d = pci_msi_get_device_domain(dev);
1697 if (d)
1698 return d;
1699
098259eb
MZ
1700 return NULL;
1701}
1702
44aa0c65
MZ
1703static void pci_set_msi_domain(struct pci_dev *dev)
1704{
098259eb
MZ
1705 struct irq_domain *d;
1706
44aa0c65 1707 /*
098259eb
MZ
1708 * If the platform or firmware interfaces cannot supply a
1709 * device-specific MSI domain, then inherit the default domain
1710 * from the host bridge itself.
44aa0c65 1711 */
098259eb
MZ
1712 d = pci_dev_msi_domain(dev);
1713 if (!d)
1714 d = dev_get_msi_domain(&dev->bus->dev);
1715
1716 dev_set_msi_domain(&dev->dev, d);
44aa0c65
MZ
1717}
1718
50230713
SS
1719/**
1720 * pci_dma_configure - Setup DMA configuration
1721 * @dev: ptr to pci_dev struct of the PCI device
1722 *
1723 * Function to update PCI devices's DMA configuration using the same
29dbe1f0 1724 * info from the OF node or ACPI node of host bridge's parent (if any).
50230713
SS
1725 */
1726static void pci_dma_configure(struct pci_dev *dev)
1727{
1728 struct device *bridge = pci_get_host_bridge_device(dev);
1729
768acd64
SS
1730 if (IS_ENABLED(CONFIG_OF) &&
1731 bridge->parent && bridge->parent->of_node) {
50230713 1732 of_dma_configure(&dev->dev, bridge->parent->of_node);
29dbe1f0
SS
1733 } else if (has_acpi_companion(bridge)) {
1734 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1735 enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1736
1737 if (attr == DEV_DMA_NOT_SUPPORTED)
1738 dev_warn(&dev->dev, "DMA not supported.\n");
1739 else
1740 arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
1741 attr == DEV_DMA_COHERENT);
50230713
SS
1742 }
1743
1744 pci_put_host_bridge_device(bridge);
1745}
1746
96bde06a 1747void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1da177e4 1748{
4f535093
YL
1749 int ret;
1750
6cd33649
BH
1751 pci_configure_device(dev);
1752
cdb9b9f7
PM
1753 device_initialize(&dev->dev);
1754 dev->dev.release = pci_release_dev;
1da177e4 1755
7629d19a 1756 set_dev_node(&dev->dev, pcibus_to_node(bus));
cdb9b9f7 1757 dev->dev.dma_mask = &dev->dma_mask;
4d57cdfa 1758 dev->dev.dma_parms = &dev->dma_parms;
cdb9b9f7 1759 dev->dev.coherent_dma_mask = 0xffffffffull;
50230713 1760 pci_dma_configure(dev);
1da177e4 1761
4d57cdfa 1762 pci_set_dma_max_seg_size(dev, 65536);
59fc67de 1763 pci_set_dma_seg_boundary(dev, 0xffffffff);
4d57cdfa 1764
1da177e4
LT
1765 /* Fix up broken headers */
1766 pci_fixup_device(pci_fixup_header, dev);
1767
2069ecfb
YL
1768 /* moved out from quirk header fixup code */
1769 pci_reassigndev_resource_alignment(dev);
1770
4b77b0a2
RW
1771 /* Clear the state_saved flag. */
1772 dev->state_saved = false;
1773
201de56e
ZY
1774 /* Initialize various capabilities */
1775 pci_init_capabilities(dev);
eb9d0fe4 1776
1da177e4
LT
1777 /*
1778 * Add the device to our list of discovered devices
1779 * and the bus list for fixup functions, etc.
1780 */
d71374da 1781 down_write(&pci_bus_sem);
1da177e4 1782 list_add_tail(&dev->bus_list, &bus->devices);
d71374da 1783 up_write(&pci_bus_sem);
4f535093 1784
4f535093
YL
1785 ret = pcibios_add_device(dev);
1786 WARN_ON(ret < 0);
1787
44aa0c65
MZ
1788 /* Setup MSI irq domain */
1789 pci_set_msi_domain(dev);
1790
4f535093
YL
1791 /* Notifier could use PCI capabilities */
1792 dev->match_driver = false;
1793 ret = device_add(&dev->dev);
1794 WARN_ON(ret < 0);
cdb9b9f7
PM
1795}
1796
10874f5a 1797struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
cdb9b9f7
PM
1798{
1799 struct pci_dev *dev;
1800
90bdb311
TP
1801 dev = pci_get_slot(bus, devfn);
1802 if (dev) {
1803 pci_dev_put(dev);
1804 return dev;
1805 }
1806
cdb9b9f7
PM
1807 dev = pci_scan_device(bus, devfn);
1808 if (!dev)
1809 return NULL;
1810
1811 pci_device_add(dev, bus);
1da177e4
LT
1812
1813 return dev;
1814}
b73e9687 1815EXPORT_SYMBOL(pci_scan_single_device);
1da177e4 1816
b1bd58e4 1817static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
f07852d6 1818{
b1bd58e4
YW
1819 int pos;
1820 u16 cap = 0;
1821 unsigned next_fn;
4fb88c1a 1822
b1bd58e4
YW
1823 if (pci_ari_enabled(bus)) {
1824 if (!dev)
1825 return 0;
1826 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1827 if (!pos)
1828 return 0;
4fb88c1a 1829
b1bd58e4
YW
1830 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1831 next_fn = PCI_ARI_CAP_NFN(cap);
1832 if (next_fn <= fn)
1833 return 0; /* protect against malformed list */
f07852d6 1834
b1bd58e4
YW
1835 return next_fn;
1836 }
1837
1838 /* dev may be NULL for non-contiguous multifunction devices */
1839 if (!dev || dev->multifunction)
1840 return (fn + 1) % 8;
f07852d6 1841
f07852d6
MW
1842 return 0;
1843}
1844
1845static int only_one_child(struct pci_bus *bus)
1846{
1847 struct pci_dev *parent = bus->self;
284f5f9d 1848
f07852d6
MW
1849 if (!parent || !pci_is_pcie(parent))
1850 return 0;
62f87c0e 1851 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
284f5f9d 1852 return 1;
5bbe029f
BH
1853
1854 /*
1855 * PCIe downstream ports are bridges that normally lead to only a
1856 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
1857 * possible devices, not just device 0. See PCIe spec r3.0,
1858 * sec 7.3.1.
1859 */
777e61ea 1860 if (parent->has_secondary_link &&
284f5f9d 1861 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
f07852d6
MW
1862 return 1;
1863 return 0;
1864}
1865
1da177e4
LT
1866/**
1867 * pci_scan_slot - scan a PCI slot on a bus for devices.
1868 * @bus: PCI bus to scan
1869 * @devfn: slot number to scan (must have zero function.)
1870 *
1871 * Scan a PCI slot on the specified PCI bus for devices, adding
1872 * discovered devices to the @bus->devices list. New devices
8a1bc901 1873 * will not have is_added set.
1b69dfc6
TP
1874 *
1875 * Returns the number of new devices found.
1da177e4 1876 */
96bde06a 1877int pci_scan_slot(struct pci_bus *bus, int devfn)
1da177e4 1878{
f07852d6 1879 unsigned fn, nr = 0;
1b69dfc6 1880 struct pci_dev *dev;
f07852d6
MW
1881
1882 if (only_one_child(bus) && (devfn > 0))
1883 return 0; /* Already scanned the entire slot */
1da177e4 1884
1b69dfc6 1885 dev = pci_scan_single_device(bus, devfn);
4fb88c1a
MW
1886 if (!dev)
1887 return 0;
1888 if (!dev->is_added)
1b69dfc6
TP
1889 nr++;
1890
b1bd58e4 1891 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
f07852d6
MW
1892 dev = pci_scan_single_device(bus, devfn + fn);
1893 if (dev) {
1894 if (!dev->is_added)
1895 nr++;
1896 dev->multifunction = 1;
1da177e4
LT
1897 }
1898 }
7d715a6c 1899
149e1637
SL
1900 /* only one slot has pcie device */
1901 if (bus->self && nr)
7d715a6c
SL
1902 pcie_aspm_init_link_state(bus->self);
1903
1da177e4
LT
1904 return nr;
1905}
b7fe9434 1906EXPORT_SYMBOL(pci_scan_slot);
1da177e4 1907
b03e7495
JM
1908static int pcie_find_smpss(struct pci_dev *dev, void *data)
1909{
1910 u8 *smpss = data;
1911
1912 if (!pci_is_pcie(dev))
1913 return 0;
1914
d4aa68f6
YW
1915 /*
1916 * We don't have a way to change MPS settings on devices that have
1917 * drivers attached. A hot-added device might support only the minimum
1918 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1919 * where devices may be hot-added, we limit the fabric MPS to 128 so
1920 * hot-added devices will work correctly.
1921 *
1922 * However, if we hot-add a device to a slot directly below a Root
1923 * Port, it's impossible for there to be other existing devices below
1924 * the port. We don't limit the MPS in this case because we can
1925 * reconfigure MPS on both the Root Port and the hot-added device,
1926 * and there are no other devices involved.
1927 *
1928 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
b03e7495 1929 */
d4aa68f6
YW
1930 if (dev->is_hotplug_bridge &&
1931 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
b03e7495
JM
1932 *smpss = 0;
1933
1934 if (*smpss > dev->pcie_mpss)
1935 *smpss = dev->pcie_mpss;
1936
1937 return 0;
1938}
1939
1940static void pcie_write_mps(struct pci_dev *dev, int mps)
1941{
62f392ea 1942 int rc;
b03e7495
JM
1943
1944 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
62f392ea 1945 mps = 128 << dev->pcie_mpss;
b03e7495 1946
62f87c0e
YW
1947 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1948 dev->bus->self)
62f392ea 1949 /* For "Performance", the assumption is made that
b03e7495
JM
1950 * downstream communication will never be larger than
1951 * the MRRS. So, the MPS only needs to be configured
1952 * for the upstream communication. This being the case,
1953 * walk from the top down and set the MPS of the child
1954 * to that of the parent bus.
62f392ea
JM
1955 *
1956 * Configure the device MPS with the smaller of the
1957 * device MPSS or the bridge MPS (which is assumed to be
1958 * properly configured at this point to the largest
1959 * allowable MPS based on its parent bus).
b03e7495 1960 */
62f392ea 1961 mps = min(mps, pcie_get_mps(dev->bus->self));
b03e7495
JM
1962 }
1963
1964 rc = pcie_set_mps(dev, mps);
1965 if (rc)
1966 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1967}
1968
62f392ea 1969static void pcie_write_mrrs(struct pci_dev *dev)
b03e7495 1970{
62f392ea 1971 int rc, mrrs;
b03e7495 1972
ed2888e9
JM
1973 /* In the "safe" case, do not configure the MRRS. There appear to be
1974 * issues with setting MRRS to 0 on a number of devices.
1975 */
ed2888e9
JM
1976 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1977 return;
1978
ed2888e9
JM
1979 /* For Max performance, the MRRS must be set to the largest supported
1980 * value. However, it cannot be configured larger than the MPS the
62f392ea
JM
1981 * device or the bus can support. This should already be properly
1982 * configured by a prior call to pcie_write_mps.
ed2888e9 1983 */
62f392ea 1984 mrrs = pcie_get_mps(dev);
b03e7495
JM
1985
1986 /* MRRS is a R/W register. Invalid values can be written, but a
ed2888e9 1987 * subsequent read will verify if the value is acceptable or not.
b03e7495
JM
1988 * If the MRRS value provided is not acceptable (e.g., too large),
1989 * shrink the value until it is acceptable to the HW.
f7625980 1990 */
b03e7495
JM
1991 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1992 rc = pcie_set_readrq(dev, mrrs);
62f392ea
JM
1993 if (!rc)
1994 break;
b03e7495 1995
62f392ea 1996 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
b03e7495
JM
1997 mrrs /= 2;
1998 }
62f392ea
JM
1999
2000 if (mrrs < 128)
227f0647 2001 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
b03e7495
JM
2002}
2003
2004static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2005{
a513a99a 2006 int mps, orig_mps;
b03e7495
JM
2007
2008 if (!pci_is_pcie(dev))
2009 return 0;
2010
27d868b5
KB
2011 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2012 pcie_bus_config == PCIE_BUS_DEFAULT)
5895af79 2013 return 0;
5895af79 2014
a513a99a
JM
2015 mps = 128 << *(u8 *)data;
2016 orig_mps = pcie_get_mps(dev);
b03e7495
JM
2017
2018 pcie_write_mps(dev, mps);
62f392ea 2019 pcie_write_mrrs(dev);
b03e7495 2020
227f0647
RD
2021 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2022 pcie_get_mps(dev), 128 << dev->pcie_mpss,
a513a99a 2023 orig_mps, pcie_get_readrq(dev));
b03e7495
JM
2024
2025 return 0;
2026}
2027
a513a99a 2028/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
b03e7495
JM
2029 * parents then children fashion. If this changes, then this code will not
2030 * work as designed.
2031 */
a58674ff 2032void pcie_bus_configure_settings(struct pci_bus *bus)
b03e7495 2033{
1e358f94 2034 u8 smpss = 0;
b03e7495 2035
a58674ff 2036 if (!bus->self)
b03e7495
JM
2037 return;
2038
b03e7495 2039 if (!pci_is_pcie(bus->self))
5f39e670
JM
2040 return;
2041
2042 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
3315472c 2043 * to be aware of the MPS of the destination. To work around this,
5f39e670
JM
2044 * simply force the MPS of the entire system to the smallest possible.
2045 */
2046 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2047 smpss = 0;
2048
b03e7495 2049 if (pcie_bus_config == PCIE_BUS_SAFE) {
a58674ff 2050 smpss = bus->self->pcie_mpss;
5f39e670 2051
b03e7495
JM
2052 pcie_find_smpss(bus->self, &smpss);
2053 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2054 }
2055
2056 pcie_bus_configure_set(bus->self, &smpss);
2057 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2058}
debc3b77 2059EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
b03e7495 2060
15856ad5 2061unsigned int pci_scan_child_bus(struct pci_bus *bus)
1da177e4 2062{
b918c62e 2063 unsigned int devfn, pass, max = bus->busn_res.start;
1da177e4
LT
2064 struct pci_dev *dev;
2065
0207c356 2066 dev_dbg(&bus->dev, "scanning bus\n");
1da177e4
LT
2067
2068 /* Go find them, Rover! */
2069 for (devfn = 0; devfn < 0x100; devfn += 8)
2070 pci_scan_slot(bus, devfn);
2071
a28724b0
YZ
2072 /* Reserve buses for SR-IOV capability. */
2073 max += pci_iov_bus_range(bus);
2074
1da177e4
LT
2075 /*
2076 * After performing arch-dependent fixup of the bus, look behind
2077 * all PCI-to-PCI bridges on this bus.
2078 */
74710ded 2079 if (!bus->is_added) {
0207c356 2080 dev_dbg(&bus->dev, "fixups for bus\n");
74710ded 2081 pcibios_fixup_bus(bus);
981cf9ea 2082 bus->is_added = 1;
74710ded
AC
2083 }
2084
3c78bc61 2085 for (pass = 0; pass < 2; pass++)
1da177e4 2086 list_for_each_entry(dev, &bus->devices, bus_list) {
6788a51f 2087 if (pci_is_bridge(dev))
1da177e4
LT
2088 max = pci_scan_bridge(bus, dev, max, pass);
2089 }
2090
e16b4660
KB
2091 /*
2092 * Make sure a hotplug bridge has at least the minimum requested
2093 * number of buses.
2094 */
2095 if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2096 if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2097 max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2098 }
2099
1da177e4
LT
2100 /*
2101 * We've scanned the bus and so we know all about what's on
2102 * the other side of any bridges that may be on this bus plus
2103 * any devices.
2104 *
2105 * Return how far we've got finding sub-buses.
2106 */
0207c356 2107 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1da177e4
LT
2108 return max;
2109}
b7fe9434 2110EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1da177e4 2111
6c0cc950
RW
2112/**
2113 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2114 * @bridge: Host bridge to set up.
2115 *
2116 * Default empty implementation. Replace with an architecture-specific setup
2117 * routine, if necessary.
2118 */
2119int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2120{
2121 return 0;
2122}
2123
10a95747
JL
2124void __weak pcibios_add_bus(struct pci_bus *bus)
2125{
2126}
2127
2128void __weak pcibios_remove_bus(struct pci_bus *bus)
2129{
2130}
2131
166c6370
BH
2132struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2133 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1da177e4 2134{
0efd5aab 2135 int error;
5a21d70d 2136 struct pci_host_bridge *bridge;
0207c356 2137 struct pci_bus *b, *b2;
14d76b68 2138 struct resource_entry *window, *n;
a9d9f527 2139 struct resource *res;
0efd5aab
BH
2140 resource_size_t offset;
2141 char bus_addr[64];
2142 char *fmt;
1da177e4 2143
670ba0c8 2144 b = pci_alloc_bus(NULL);
1da177e4 2145 if (!b)
7b543663 2146 return NULL;
1da177e4
LT
2147
2148 b->sysdata = sysdata;
2149 b->ops = ops;
4f535093 2150 b->number = b->busn_res.start = bus;
9c7cb891
TN
2151#ifdef CONFIG_PCI_DOMAINS_GENERIC
2152 b->domain_nr = pci_bus_find_domain_nr(b, parent);
2153#endif
0207c356
BH
2154 b2 = pci_find_bus(pci_domain_nr(b), bus);
2155 if (b2) {
1da177e4 2156 /* If we already got to this bus through a different bridge, ignore it */
0207c356 2157 dev_dbg(&b2->dev, "bus already known\n");
1da177e4
LT
2158 goto err_out;
2159 }
d71374da 2160
7b543663
YL
2161 bridge = pci_alloc_host_bridge(b);
2162 if (!bridge)
2163 goto err_out;
2164
2165 bridge->dev.parent = parent;
70efde2a 2166 bridge->dev.release = pci_release_host_bridge_dev;
7b543663 2167 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
6c0cc950 2168 error = pcibios_root_bridge_prepare(bridge);
343df771
JL
2169 if (error) {
2170 kfree(bridge);
2171 goto err_out;
2172 }
6c0cc950 2173
7b543663 2174 error = device_register(&bridge->dev);
343df771
JL
2175 if (error) {
2176 put_device(&bridge->dev);
2177 goto err_out;
2178 }
7b543663 2179 b->bridge = get_device(&bridge->dev);
a1e4d72c 2180 device_enable_async_suspend(b->bridge);
98d9f30c 2181 pci_set_bus_of_node(b);
44aa0c65 2182 pci_set_bus_msi_domain(b);
1da177e4 2183
0d358f22
YL
2184 if (!parent)
2185 set_dev_node(b->bridge, pcibus_to_node(b));
2186
fd7d1ced
GKH
2187 b->dev.class = &pcibus_class;
2188 b->dev.parent = b->bridge;
1a927133 2189 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
fd7d1ced 2190 error = device_register(&b->dev);
1da177e4
LT
2191 if (error)
2192 goto class_dev_reg_err;
1da177e4 2193
10a95747
JL
2194 pcibios_add_bus(b);
2195
1da177e4
LT
2196 /* Create legacy_io and legacy_mem files for this bus */
2197 pci_create_legacy_files(b);
2198
a9d9f527
BH
2199 if (parent)
2200 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2201 else
2202 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2203
0efd5aab 2204 /* Add initial resources to the bus */
14d76b68
JL
2205 resource_list_for_each_entry_safe(window, n, resources) {
2206 list_move_tail(&window->node, &bridge->windows);
0efd5aab
BH
2207 res = window->res;
2208 offset = window->offset;
f848ffb1
YL
2209 if (res->flags & IORESOURCE_BUS)
2210 pci_bus_insert_busn_res(b, bus, res->end);
2211 else
2212 pci_bus_add_resource(b, res, 0);
0efd5aab
BH
2213 if (offset) {
2214 if (resource_type(res) == IORESOURCE_IO)
2215 fmt = " (bus address [%#06llx-%#06llx])";
2216 else
2217 fmt = " (bus address [%#010llx-%#010llx])";
2218 snprintf(bus_addr, sizeof(bus_addr), fmt,
2219 (unsigned long long) (res->start - offset),
2220 (unsigned long long) (res->end - offset));
2221 } else
2222 bus_addr[0] = '\0';
2223 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
a9d9f527
BH
2224 }
2225
a5390aa6
BH
2226 down_write(&pci_bus_sem);
2227 list_add_tail(&b->node, &pci_root_buses);
2228 up_write(&pci_bus_sem);
2229
1da177e4
LT
2230 return b;
2231
1da177e4 2232class_dev_reg_err:
7b543663
YL
2233 put_device(&bridge->dev);
2234 device_unregister(&bridge->dev);
1da177e4 2235err_out:
1da177e4
LT
2236 kfree(b);
2237 return NULL;
2238}
e6b29dea 2239EXPORT_SYMBOL_GPL(pci_create_root_bus);
cdb9b9f7 2240
98a35831
YL
2241int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2242{
2243 struct resource *res = &b->busn_res;
2244 struct resource *parent_res, *conflict;
2245
2246 res->start = bus;
2247 res->end = bus_max;
2248 res->flags = IORESOURCE_BUS;
2249
2250 if (!pci_is_root_bus(b))
2251 parent_res = &b->parent->busn_res;
2252 else {
2253 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2254 res->flags |= IORESOURCE_PCI_FIXED;
2255 }
2256
ced04d15 2257 conflict = request_resource_conflict(parent_res, res);
98a35831
YL
2258
2259 if (conflict)
2260 dev_printk(KERN_DEBUG, &b->dev,
2261 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2262 res, pci_is_root_bus(b) ? "domain " : "",
2263 parent_res, conflict->name, conflict);
98a35831
YL
2264
2265 return conflict == NULL;
2266}
2267
2268int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2269{
2270 struct resource *res = &b->busn_res;
2271 struct resource old_res = *res;
2272 resource_size_t size;
2273 int ret;
2274
2275 if (res->start > bus_max)
2276 return -EINVAL;
2277
2278 size = bus_max - res->start + 1;
2279 ret = adjust_resource(res, res->start, size);
2280 dev_printk(KERN_DEBUG, &b->dev,
2281 "busn_res: %pR end %s updated to %02x\n",
2282 &old_res, ret ? "can not be" : "is", bus_max);
2283
2284 if (!ret && !res->parent)
2285 pci_bus_insert_busn_res(b, res->start, res->end);
2286
2287 return ret;
2288}
2289
2290void pci_bus_release_busn_res(struct pci_bus *b)
2291{
2292 struct resource *res = &b->busn_res;
2293 int ret;
2294
2295 if (!res->flags || !res->parent)
2296 return;
2297
2298 ret = release_resource(res);
2299 dev_printk(KERN_DEBUG, &b->dev,
2300 "busn_res: %pR %s released\n",
2301 res, ret ? "can not be" : "is");
2302}
2303
d2a7926d
LP
2304struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2305 struct pci_ops *ops, void *sysdata,
2306 struct list_head *resources, struct msi_controller *msi)
a2ebb827 2307{
14d76b68 2308 struct resource_entry *window;
4d99f524 2309 bool found = false;
a2ebb827 2310 struct pci_bus *b;
4d99f524
YL
2311 int max;
2312
14d76b68 2313 resource_list_for_each_entry(window, resources)
4d99f524
YL
2314 if (window->res->flags & IORESOURCE_BUS) {
2315 found = true;
2316 break;
2317 }
a2ebb827
BH
2318
2319 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2320 if (!b)
2321 return NULL;
2322
d2a7926d
LP
2323 b->msi = msi;
2324
4d99f524
YL
2325 if (!found) {
2326 dev_info(&b->dev,
2327 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2328 bus);
2329 pci_bus_insert_busn_res(b, bus, 255);
2330 }
2331
2332 max = pci_scan_child_bus(b);
2333
2334 if (!found)
2335 pci_bus_update_busn_res_end(b, max);
2336
a2ebb827
BH
2337 return b;
2338}
d2a7926d
LP
2339
2340struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2341 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2342{
2343 return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2344 NULL);
2345}
a2ebb827
BH
2346EXPORT_SYMBOL(pci_scan_root_bus);
2347
15856ad5 2348struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
de4b2f76
BH
2349 void *sysdata)
2350{
2351 LIST_HEAD(resources);
2352 struct pci_bus *b;
2353
2354 pci_add_resource(&resources, &ioport_resource);
2355 pci_add_resource(&resources, &iomem_resource);
857c3b66 2356 pci_add_resource(&resources, &busn_resource);
de4b2f76
BH
2357 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2358 if (b) {
857c3b66 2359 pci_scan_child_bus(b);
de4b2f76
BH
2360 } else {
2361 pci_free_resource_list(&resources);
2362 }
2363 return b;
2364}
2365EXPORT_SYMBOL(pci_scan_bus);
2366
2f320521
YL
2367/**
2368 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2369 * @bridge: PCI bridge for the bus to scan
2370 *
2371 * Scan a PCI bus and child buses for new devices, add them,
2372 * and enable them, resizing bridge mmio/io resource if necessary
2373 * and possible. The caller must ensure the child devices are already
2374 * removed for resizing to occur.
2375 *
2376 * Returns the max number of subordinate bus discovered.
2377 */
10874f5a 2378unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2f320521
YL
2379{
2380 unsigned int max;
2381 struct pci_bus *bus = bridge->subordinate;
2382
2383 max = pci_scan_child_bus(bus);
2384
2385 pci_assign_unassigned_bridge_resources(bridge);
2386
2387 pci_bus_add_devices(bus);
2388
2389 return max;
2390}
2391
a5213a31
YL
2392/**
2393 * pci_rescan_bus - scan a PCI bus for devices.
2394 * @bus: PCI bus to scan
2395 *
2396 * Scan a PCI bus and child buses for new devices, adds them,
2397 * and enables them.
2398 *
2399 * Returns the max number of subordinate bus discovered.
2400 */
10874f5a 2401unsigned int pci_rescan_bus(struct pci_bus *bus)
a5213a31
YL
2402{
2403 unsigned int max;
2404
2405 max = pci_scan_child_bus(bus);
2406 pci_assign_unassigned_bus_resources(bus);
2407 pci_bus_add_devices(bus);
2408
2409 return max;
2410}
2411EXPORT_SYMBOL_GPL(pci_rescan_bus);
2412
9d16947b
RW
2413/*
2414 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2415 * routines should always be executed under this mutex.
2416 */
2417static DEFINE_MUTEX(pci_rescan_remove_lock);
2418
2419void pci_lock_rescan_remove(void)
2420{
2421 mutex_lock(&pci_rescan_remove_lock);
2422}
2423EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2424
2425void pci_unlock_rescan_remove(void)
2426{
2427 mutex_unlock(&pci_rescan_remove_lock);
2428}
2429EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2430
3c78bc61
RD
2431static int __init pci_sort_bf_cmp(const struct device *d_a,
2432 const struct device *d_b)
6b4b78fe 2433{
99178b03
GKH
2434 const struct pci_dev *a = to_pci_dev(d_a);
2435 const struct pci_dev *b = to_pci_dev(d_b);
2436
6b4b78fe
MD
2437 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2438 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2439
2440 if (a->bus->number < b->bus->number) return -1;
2441 else if (a->bus->number > b->bus->number) return 1;
2442
2443 if (a->devfn < b->devfn) return -1;
2444 else if (a->devfn > b->devfn) return 1;
2445
2446 return 0;
2447}
2448
5ff580c1 2449void __init pci_sort_breadthfirst(void)
6b4b78fe 2450{
99178b03 2451 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
6b4b78fe 2452}
This page took 1.020533 seconds and 5 git commands to generate.