PCI: Fix MPS peer-to-peer DMA comment syntax
[deliverable/linux.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
18
19 struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
24 };
25
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29
30 static LIST_HEAD(pci_domain_busn_res_list);
31
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
36 };
37
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 struct pci_domain_busn_res *r;
41
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
45
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
49
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
56
57 return &r->res;
58 }
59
60 static int find_anything(struct device *dev, void *data)
61 {
62 return 1;
63 }
64
65 /*
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
69 */
70 int no_pci_devices(void)
71 {
72 struct device *dev;
73 int no_devices;
74
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
94 }
95
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_attrs = pcibus_dev_attrs,
100 };
101
102 static int __init pcibus_class_init(void)
103 {
104 return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
113
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
117
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
122
123 return size;
124 }
125
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 u32 mem_type;
129 unsigned long flags;
130
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
135 }
136
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
141
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
155 }
156 return flags;
157 }
158
159 /**
160 * pci_read_base - read a PCI BAR
161 * @dev: the PCI device
162 * @type: type of the BAR
163 * @res: resource buffer to be filled in
164 * @pos: BAR position in the config space
165 *
166 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
167 */
168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
169 struct resource *res, unsigned int pos)
170 {
171 u32 l, sz, mask;
172 u16 orig_cmd;
173 struct pci_bus_region region, inverted_region;
174 bool bar_too_big = false, bar_disabled = false;
175
176 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
177
178 /* No printks while decoding is disabled! */
179 if (!dev->mmio_always_on) {
180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 pci_write_config_word(dev, PCI_COMMAND,
182 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 }
184
185 res->name = pci_name(dev);
186
187 pci_read_config_dword(dev, pos, &l);
188 pci_write_config_dword(dev, pos, l | mask);
189 pci_read_config_dword(dev, pos, &sz);
190 pci_write_config_dword(dev, pos, l);
191
192 /*
193 * All bits set in sz means the device isn't working properly.
194 * If the BAR isn't implemented, all bits must be 0. If it's a
195 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 * 1 must be clear.
197 */
198 if (!sz || sz == 0xffffffff)
199 goto fail;
200
201 /*
202 * I don't know how l can have all bits set. Copied from old code.
203 * Maybe it fixes a bug on some ancient platform.
204 */
205 if (l == 0xffffffff)
206 l = 0;
207
208 if (type == pci_bar_unknown) {
209 res->flags = decode_bar(dev, l);
210 res->flags |= IORESOURCE_SIZEALIGN;
211 if (res->flags & IORESOURCE_IO) {
212 l &= PCI_BASE_ADDRESS_IO_MASK;
213 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 } else {
215 l &= PCI_BASE_ADDRESS_MEM_MASK;
216 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 }
218 } else {
219 res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 l &= PCI_ROM_ADDRESS_MASK;
221 mask = (u32)PCI_ROM_ADDRESS_MASK;
222 }
223
224 if (res->flags & IORESOURCE_MEM_64) {
225 u64 l64 = l;
226 u64 sz64 = sz;
227 u64 mask64 = mask | (u64)~0 << 32;
228
229 pci_read_config_dword(dev, pos + 4, &l);
230 pci_write_config_dword(dev, pos + 4, ~0);
231 pci_read_config_dword(dev, pos + 4, &sz);
232 pci_write_config_dword(dev, pos + 4, l);
233
234 l64 |= ((u64)l << 32);
235 sz64 |= ((u64)sz << 32);
236
237 sz64 = pci_size(l64, sz64, mask64);
238
239 if (!sz64)
240 goto fail;
241
242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 bar_too_big = true;
244 goto fail;
245 }
246
247 if ((sizeof(resource_size_t) < 8) && l) {
248 /* Address above 32-bit boundary; disable the BAR */
249 pci_write_config_dword(dev, pos, 0);
250 pci_write_config_dword(dev, pos + 4, 0);
251 region.start = 0;
252 region.end = sz64;
253 bar_disabled = true;
254 } else {
255 region.start = l64;
256 region.end = l64 + sz64;
257 }
258 } else {
259 sz = pci_size(l, sz, mask);
260
261 if (!sz)
262 goto fail;
263
264 region.start = l;
265 region.end = l + sz;
266 }
267
268 pcibios_bus_to_resource(dev, res, &region);
269 pcibios_resource_to_bus(dev, &inverted_region, res);
270
271 /*
272 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
273 * the corresponding resource address (the physical address used by
274 * the CPU. Converting that resource address back to a bus address
275 * should yield the original BAR value:
276 *
277 * resource_to_bus(bus_to_resource(A)) == A
278 *
279 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
280 * be claimed by the device.
281 */
282 if (inverted_region.start != region.start) {
283 dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
284 pos, &region.start);
285 res->flags |= IORESOURCE_UNSET;
286 res->end -= res->start;
287 res->start = 0;
288 }
289
290 goto out;
291
292
293 fail:
294 res->flags = 0;
295 out:
296 if (!dev->mmio_always_on)
297 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
298
299 if (bar_too_big)
300 dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
301 if (res->flags && !bar_disabled)
302 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
303
304 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
305 }
306
307 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
308 {
309 unsigned int pos, reg;
310
311 for (pos = 0; pos < howmany; pos++) {
312 struct resource *res = &dev->resource[pos];
313 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
314 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
315 }
316
317 if (rom) {
318 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
319 dev->rom_base_reg = rom;
320 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
321 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
322 IORESOURCE_SIZEALIGN;
323 __pci_read_base(dev, pci_bar_mem32, res, rom);
324 }
325 }
326
327 static void pci_read_bridge_io(struct pci_bus *child)
328 {
329 struct pci_dev *dev = child->self;
330 u8 io_base_lo, io_limit_lo;
331 unsigned long io_mask, io_granularity, base, limit;
332 struct pci_bus_region region;
333 struct resource *res;
334
335 io_mask = PCI_IO_RANGE_MASK;
336 io_granularity = 0x1000;
337 if (dev->io_window_1k) {
338 /* Support 1K I/O space granularity */
339 io_mask = PCI_IO_1K_RANGE_MASK;
340 io_granularity = 0x400;
341 }
342
343 res = child->resource[0];
344 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
345 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
346 base = (io_base_lo & io_mask) << 8;
347 limit = (io_limit_lo & io_mask) << 8;
348
349 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
350 u16 io_base_hi, io_limit_hi;
351
352 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
353 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
354 base |= ((unsigned long) io_base_hi << 16);
355 limit |= ((unsigned long) io_limit_hi << 16);
356 }
357
358 if (base <= limit) {
359 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
360 region.start = base;
361 region.end = limit + io_granularity - 1;
362 pcibios_bus_to_resource(dev, res, &region);
363 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
364 }
365 }
366
367 static void pci_read_bridge_mmio(struct pci_bus *child)
368 {
369 struct pci_dev *dev = child->self;
370 u16 mem_base_lo, mem_limit_lo;
371 unsigned long base, limit;
372 struct pci_bus_region region;
373 struct resource *res;
374
375 res = child->resource[1];
376 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
377 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
378 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
379 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
380 if (base <= limit) {
381 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
382 region.start = base;
383 region.end = limit + 0xfffff;
384 pcibios_bus_to_resource(dev, res, &region);
385 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
386 }
387 }
388
389 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
390 {
391 struct pci_dev *dev = child->self;
392 u16 mem_base_lo, mem_limit_lo;
393 unsigned long base, limit;
394 struct pci_bus_region region;
395 struct resource *res;
396
397 res = child->resource[2];
398 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
399 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
400 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
401 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
402
403 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
404 u32 mem_base_hi, mem_limit_hi;
405
406 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
407 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
408
409 /*
410 * Some bridges set the base > limit by default, and some
411 * (broken) BIOSes do not initialize them. If we find
412 * this, just assume they are not being used.
413 */
414 if (mem_base_hi <= mem_limit_hi) {
415 #if BITS_PER_LONG == 64
416 base |= ((unsigned long) mem_base_hi) << 32;
417 limit |= ((unsigned long) mem_limit_hi) << 32;
418 #else
419 if (mem_base_hi || mem_limit_hi) {
420 dev_err(&dev->dev, "can't handle 64-bit "
421 "address space for bridge\n");
422 return;
423 }
424 #endif
425 }
426 }
427 if (base <= limit) {
428 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
429 IORESOURCE_MEM | IORESOURCE_PREFETCH;
430 if (res->flags & PCI_PREF_RANGE_TYPE_64)
431 res->flags |= IORESOURCE_MEM_64;
432 region.start = base;
433 region.end = limit + 0xfffff;
434 pcibios_bus_to_resource(dev, res, &region);
435 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
436 }
437 }
438
439 void pci_read_bridge_bases(struct pci_bus *child)
440 {
441 struct pci_dev *dev = child->self;
442 struct resource *res;
443 int i;
444
445 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
446 return;
447
448 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
449 &child->busn_res,
450 dev->transparent ? " (subtractive decode)" : "");
451
452 pci_bus_remove_resources(child);
453 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
454 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
455
456 pci_read_bridge_io(child);
457 pci_read_bridge_mmio(child);
458 pci_read_bridge_mmio_pref(child);
459
460 if (dev->transparent) {
461 pci_bus_for_each_resource(child->parent, res, i) {
462 if (res) {
463 pci_bus_add_resource(child, res,
464 PCI_SUBTRACTIVE_DECODE);
465 dev_printk(KERN_DEBUG, &dev->dev,
466 " bridge window %pR (subtractive decode)\n",
467 res);
468 }
469 }
470 }
471 }
472
473 static struct pci_bus *pci_alloc_bus(void)
474 {
475 struct pci_bus *b;
476
477 b = kzalloc(sizeof(*b), GFP_KERNEL);
478 if (!b)
479 return NULL;
480
481 INIT_LIST_HEAD(&b->node);
482 INIT_LIST_HEAD(&b->children);
483 INIT_LIST_HEAD(&b->devices);
484 INIT_LIST_HEAD(&b->slots);
485 INIT_LIST_HEAD(&b->resources);
486 b->max_bus_speed = PCI_SPEED_UNKNOWN;
487 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
488 return b;
489 }
490
491 static void pci_release_host_bridge_dev(struct device *dev)
492 {
493 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
494
495 if (bridge->release_fn)
496 bridge->release_fn(bridge);
497
498 pci_free_resource_list(&bridge->windows);
499
500 kfree(bridge);
501 }
502
503 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
504 {
505 struct pci_host_bridge *bridge;
506
507 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
508 if (!bridge)
509 return NULL;
510
511 INIT_LIST_HEAD(&bridge->windows);
512 bridge->bus = b;
513 return bridge;
514 }
515
516 static unsigned char pcix_bus_speed[] = {
517 PCI_SPEED_UNKNOWN, /* 0 */
518 PCI_SPEED_66MHz_PCIX, /* 1 */
519 PCI_SPEED_100MHz_PCIX, /* 2 */
520 PCI_SPEED_133MHz_PCIX, /* 3 */
521 PCI_SPEED_UNKNOWN, /* 4 */
522 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
523 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
524 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
525 PCI_SPEED_UNKNOWN, /* 8 */
526 PCI_SPEED_66MHz_PCIX_266, /* 9 */
527 PCI_SPEED_100MHz_PCIX_266, /* A */
528 PCI_SPEED_133MHz_PCIX_266, /* B */
529 PCI_SPEED_UNKNOWN, /* C */
530 PCI_SPEED_66MHz_PCIX_533, /* D */
531 PCI_SPEED_100MHz_PCIX_533, /* E */
532 PCI_SPEED_133MHz_PCIX_533 /* F */
533 };
534
535 static unsigned char pcie_link_speed[] = {
536 PCI_SPEED_UNKNOWN, /* 0 */
537 PCIE_SPEED_2_5GT, /* 1 */
538 PCIE_SPEED_5_0GT, /* 2 */
539 PCIE_SPEED_8_0GT, /* 3 */
540 PCI_SPEED_UNKNOWN, /* 4 */
541 PCI_SPEED_UNKNOWN, /* 5 */
542 PCI_SPEED_UNKNOWN, /* 6 */
543 PCI_SPEED_UNKNOWN, /* 7 */
544 PCI_SPEED_UNKNOWN, /* 8 */
545 PCI_SPEED_UNKNOWN, /* 9 */
546 PCI_SPEED_UNKNOWN, /* A */
547 PCI_SPEED_UNKNOWN, /* B */
548 PCI_SPEED_UNKNOWN, /* C */
549 PCI_SPEED_UNKNOWN, /* D */
550 PCI_SPEED_UNKNOWN, /* E */
551 PCI_SPEED_UNKNOWN /* F */
552 };
553
554 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
555 {
556 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
557 }
558 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
559
560 static unsigned char agp_speeds[] = {
561 AGP_UNKNOWN,
562 AGP_1X,
563 AGP_2X,
564 AGP_4X,
565 AGP_8X
566 };
567
568 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
569 {
570 int index = 0;
571
572 if (agpstat & 4)
573 index = 3;
574 else if (agpstat & 2)
575 index = 2;
576 else if (agpstat & 1)
577 index = 1;
578 else
579 goto out;
580
581 if (agp3) {
582 index += 2;
583 if (index == 5)
584 index = 0;
585 }
586
587 out:
588 return agp_speeds[index];
589 }
590
591
592 static void pci_set_bus_speed(struct pci_bus *bus)
593 {
594 struct pci_dev *bridge = bus->self;
595 int pos;
596
597 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
598 if (!pos)
599 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
600 if (pos) {
601 u32 agpstat, agpcmd;
602
603 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
604 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
605
606 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
607 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
608 }
609
610 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
611 if (pos) {
612 u16 status;
613 enum pci_bus_speed max;
614
615 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
616 &status);
617
618 if (status & PCI_X_SSTATUS_533MHZ) {
619 max = PCI_SPEED_133MHz_PCIX_533;
620 } else if (status & PCI_X_SSTATUS_266MHZ) {
621 max = PCI_SPEED_133MHz_PCIX_266;
622 } else if (status & PCI_X_SSTATUS_133MHZ) {
623 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
624 max = PCI_SPEED_133MHz_PCIX_ECC;
625 } else {
626 max = PCI_SPEED_133MHz_PCIX;
627 }
628 } else {
629 max = PCI_SPEED_66MHz_PCIX;
630 }
631
632 bus->max_bus_speed = max;
633 bus->cur_bus_speed = pcix_bus_speed[
634 (status & PCI_X_SSTATUS_FREQ) >> 6];
635
636 return;
637 }
638
639 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
640 if (pos) {
641 u32 linkcap;
642 u16 linksta;
643
644 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
645 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
646
647 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
648 pcie_update_link_speed(bus, linksta);
649 }
650 }
651
652
653 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
654 struct pci_dev *bridge, int busnr)
655 {
656 struct pci_bus *child;
657 int i;
658 int ret;
659
660 /*
661 * Allocate a new bus, and inherit stuff from the parent..
662 */
663 child = pci_alloc_bus();
664 if (!child)
665 return NULL;
666
667 child->parent = parent;
668 child->ops = parent->ops;
669 child->sysdata = parent->sysdata;
670 child->bus_flags = parent->bus_flags;
671
672 /* initialize some portions of the bus device, but don't register it
673 * now as the parent is not properly set up yet.
674 */
675 child->dev.class = &pcibus_class;
676 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
677
678 /*
679 * Set up the primary, secondary and subordinate
680 * bus numbers.
681 */
682 child->number = child->busn_res.start = busnr;
683 child->primary = parent->busn_res.start;
684 child->busn_res.end = 0xff;
685
686 if (!bridge) {
687 child->dev.parent = parent->bridge;
688 goto add_dev;
689 }
690
691 child->self = bridge;
692 child->bridge = get_device(&bridge->dev);
693 child->dev.parent = child->bridge;
694 pci_set_bus_of_node(child);
695 pci_set_bus_speed(child);
696
697 /* Set up default resource pointers and names.. */
698 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
699 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
700 child->resource[i]->name = child->name;
701 }
702 bridge->subordinate = child;
703
704 add_dev:
705 ret = device_register(&child->dev);
706 WARN_ON(ret < 0);
707
708 pcibios_add_bus(child);
709
710 /* Create legacy_io and legacy_mem files for this bus */
711 pci_create_legacy_files(child);
712
713 return child;
714 }
715
716 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
717 {
718 struct pci_bus *child;
719
720 child = pci_alloc_child_bus(parent, dev, busnr);
721 if (child) {
722 down_write(&pci_bus_sem);
723 list_add_tail(&child->node, &parent->children);
724 up_write(&pci_bus_sem);
725 }
726 return child;
727 }
728
729 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
730 {
731 struct pci_bus *parent = child->parent;
732
733 /* Attempts to fix that up are really dangerous unless
734 we're going to re-assign all bus numbers. */
735 if (!pcibios_assign_all_busses())
736 return;
737
738 while (parent->parent && parent->busn_res.end < max) {
739 parent->busn_res.end = max;
740 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
741 parent = parent->parent;
742 }
743 }
744
745 /*
746 * If it's a bridge, configure it and scan the bus behind it.
747 * For CardBus bridges, we don't scan behind as the devices will
748 * be handled by the bridge driver itself.
749 *
750 * We need to process bridges in two passes -- first we scan those
751 * already configured by the BIOS and after we are done with all of
752 * them, we proceed to assigning numbers to the remaining buses in
753 * order to avoid overlaps between old and new bus numbers.
754 */
755 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
756 {
757 struct pci_bus *child;
758 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
759 u32 buses, i, j = 0;
760 u16 bctl;
761 u8 primary, secondary, subordinate;
762 int broken = 0;
763
764 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
765 primary = buses & 0xFF;
766 secondary = (buses >> 8) & 0xFF;
767 subordinate = (buses >> 16) & 0xFF;
768
769 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
770 secondary, subordinate, pass);
771
772 if (!primary && (primary != bus->number) && secondary && subordinate) {
773 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
774 primary = bus->number;
775 }
776
777 /* Check if setup is sensible at all */
778 if (!pass &&
779 (primary != bus->number || secondary <= bus->number ||
780 secondary > subordinate)) {
781 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
782 secondary, subordinate);
783 broken = 1;
784 }
785
786 /* Disable MasterAbortMode during probing to avoid reporting
787 of bus errors (in some architectures) */
788 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
789 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
790 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
791
792 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
793 !is_cardbus && !broken) {
794 unsigned int cmax;
795 /*
796 * Bus already configured by firmware, process it in the first
797 * pass and just note the configuration.
798 */
799 if (pass)
800 goto out;
801
802 /*
803 * If we already got to this bus through a different bridge,
804 * don't re-add it. This can happen with the i450NX chipset.
805 *
806 * However, we continue to descend down the hierarchy and
807 * scan remaining child buses.
808 */
809 child = pci_find_bus(pci_domain_nr(bus), secondary);
810 if (!child) {
811 child = pci_add_new_bus(bus, dev, secondary);
812 if (!child)
813 goto out;
814 child->primary = primary;
815 pci_bus_insert_busn_res(child, secondary, subordinate);
816 child->bridge_ctl = bctl;
817 }
818
819 cmax = pci_scan_child_bus(child);
820 if (cmax > max)
821 max = cmax;
822 if (child->busn_res.end > max)
823 max = child->busn_res.end;
824 } else {
825 /*
826 * We need to assign a number to this bus which we always
827 * do in the second pass.
828 */
829 if (!pass) {
830 if (pcibios_assign_all_busses() || broken)
831 /* Temporarily disable forwarding of the
832 configuration cycles on all bridges in
833 this bus segment to avoid possible
834 conflicts in the second pass between two
835 bridges programmed with overlapping
836 bus ranges. */
837 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
838 buses & ~0xffffff);
839 goto out;
840 }
841
842 /* Clear errors */
843 pci_write_config_word(dev, PCI_STATUS, 0xffff);
844
845 /* Prevent assigning a bus number that already exists.
846 * This can happen when a bridge is hot-plugged, so in
847 * this case we only re-scan this bus. */
848 child = pci_find_bus(pci_domain_nr(bus), max+1);
849 if (!child) {
850 child = pci_add_new_bus(bus, dev, ++max);
851 if (!child)
852 goto out;
853 pci_bus_insert_busn_res(child, max, 0xff);
854 }
855 buses = (buses & 0xff000000)
856 | ((unsigned int)(child->primary) << 0)
857 | ((unsigned int)(child->busn_res.start) << 8)
858 | ((unsigned int)(child->busn_res.end) << 16);
859
860 /*
861 * yenta.c forces a secondary latency timer of 176.
862 * Copy that behaviour here.
863 */
864 if (is_cardbus) {
865 buses &= ~0xff000000;
866 buses |= CARDBUS_LATENCY_TIMER << 24;
867 }
868
869 /*
870 * We need to blast all three values with a single write.
871 */
872 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
873
874 if (!is_cardbus) {
875 child->bridge_ctl = bctl;
876 /*
877 * Adjust subordinate busnr in parent buses.
878 * We do this before scanning for children because
879 * some devices may not be detected if the bios
880 * was lazy.
881 */
882 pci_fixup_parent_subordinate_busnr(child, max);
883 /* Now we can scan all subordinate buses... */
884 max = pci_scan_child_bus(child);
885 /*
886 * now fix it up again since we have found
887 * the real value of max.
888 */
889 pci_fixup_parent_subordinate_busnr(child, max);
890 } else {
891 /*
892 * For CardBus bridges, we leave 4 bus numbers
893 * as cards with a PCI-to-PCI bridge can be
894 * inserted later.
895 */
896 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
897 struct pci_bus *parent = bus;
898 if (pci_find_bus(pci_domain_nr(bus),
899 max+i+1))
900 break;
901 while (parent->parent) {
902 if ((!pcibios_assign_all_busses()) &&
903 (parent->busn_res.end > max) &&
904 (parent->busn_res.end <= max+i)) {
905 j = 1;
906 }
907 parent = parent->parent;
908 }
909 if (j) {
910 /*
911 * Often, there are two cardbus bridges
912 * -- try to leave one valid bus number
913 * for each one.
914 */
915 i /= 2;
916 break;
917 }
918 }
919 max += i;
920 pci_fixup_parent_subordinate_busnr(child, max);
921 }
922 /*
923 * Set the subordinate bus number to its real value.
924 */
925 pci_bus_update_busn_res_end(child, max);
926 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
927 }
928
929 sprintf(child->name,
930 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
931 pci_domain_nr(bus), child->number);
932
933 /* Has only triggered on CardBus, fixup is in yenta_socket */
934 while (bus->parent) {
935 if ((child->busn_res.end > bus->busn_res.end) ||
936 (child->number > bus->busn_res.end) ||
937 (child->number < bus->number) ||
938 (child->busn_res.end < bus->number)) {
939 dev_info(&child->dev, "%pR %s "
940 "hidden behind%s bridge %s %pR\n",
941 &child->busn_res,
942 (bus->number > child->busn_res.end &&
943 bus->busn_res.end < child->number) ?
944 "wholly" : "partially",
945 bus->self->transparent ? " transparent" : "",
946 dev_name(&bus->dev),
947 &bus->busn_res);
948 }
949 bus = bus->parent;
950 }
951
952 out:
953 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
954
955 return max;
956 }
957
958 /*
959 * Read interrupt line and base address registers.
960 * The architecture-dependent code can tweak these, of course.
961 */
962 static void pci_read_irq(struct pci_dev *dev)
963 {
964 unsigned char irq;
965
966 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
967 dev->pin = irq;
968 if (irq)
969 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
970 dev->irq = irq;
971 }
972
973 void set_pcie_port_type(struct pci_dev *pdev)
974 {
975 int pos;
976 u16 reg16;
977
978 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
979 if (!pos)
980 return;
981 pdev->is_pcie = 1;
982 pdev->pcie_cap = pos;
983 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
984 pdev->pcie_flags_reg = reg16;
985 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
986 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
987 }
988
989 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
990 {
991 u32 reg32;
992
993 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
994 if (reg32 & PCI_EXP_SLTCAP_HPC)
995 pdev->is_hotplug_bridge = 1;
996 }
997
998 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
999
1000 /**
1001 * pci_setup_device - fill in class and map information of a device
1002 * @dev: the device structure to fill
1003 *
1004 * Initialize the device structure with information about the device's
1005 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1006 * Called at initialisation of the PCI subsystem and by CardBus services.
1007 * Returns 0 on success and negative if unknown type of device (not normal,
1008 * bridge or CardBus).
1009 */
1010 int pci_setup_device(struct pci_dev *dev)
1011 {
1012 u32 class;
1013 u8 hdr_type;
1014 struct pci_slot *slot;
1015 int pos = 0;
1016 struct pci_bus_region region;
1017 struct resource *res;
1018
1019 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1020 return -EIO;
1021
1022 dev->sysdata = dev->bus->sysdata;
1023 dev->dev.parent = dev->bus->bridge;
1024 dev->dev.bus = &pci_bus_type;
1025 dev->hdr_type = hdr_type & 0x7f;
1026 dev->multifunction = !!(hdr_type & 0x80);
1027 dev->error_state = pci_channel_io_normal;
1028 set_pcie_port_type(dev);
1029
1030 list_for_each_entry(slot, &dev->bus->slots, list)
1031 if (PCI_SLOT(dev->devfn) == slot->number)
1032 dev->slot = slot;
1033
1034 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1035 set this higher, assuming the system even supports it. */
1036 dev->dma_mask = 0xffffffff;
1037
1038 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1039 dev->bus->number, PCI_SLOT(dev->devfn),
1040 PCI_FUNC(dev->devfn));
1041
1042 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1043 dev->revision = class & 0xff;
1044 dev->class = class >> 8; /* upper 3 bytes */
1045
1046 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1047 dev->vendor, dev->device, dev->hdr_type, dev->class);
1048
1049 /* need to have dev->class ready */
1050 dev->cfg_size = pci_cfg_space_size(dev);
1051
1052 /* "Unknown power state" */
1053 dev->current_state = PCI_UNKNOWN;
1054
1055 /* Early fixups, before probing the BARs */
1056 pci_fixup_device(pci_fixup_early, dev);
1057 /* device class may be changed after fixup */
1058 class = dev->class >> 8;
1059
1060 switch (dev->hdr_type) { /* header type */
1061 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1062 if (class == PCI_CLASS_BRIDGE_PCI)
1063 goto bad;
1064 pci_read_irq(dev);
1065 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1066 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1067 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1068
1069 /*
1070 * Do the ugly legacy mode stuff here rather than broken chip
1071 * quirk code. Legacy mode ATA controllers have fixed
1072 * addresses. These are not always echoed in BAR0-3, and
1073 * BAR0-3 in a few cases contain junk!
1074 */
1075 if (class == PCI_CLASS_STORAGE_IDE) {
1076 u8 progif;
1077 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1078 if ((progif & 1) == 0) {
1079 region.start = 0x1F0;
1080 region.end = 0x1F7;
1081 res = &dev->resource[0];
1082 res->flags = LEGACY_IO_RESOURCE;
1083 pcibios_bus_to_resource(dev, res, &region);
1084 region.start = 0x3F6;
1085 region.end = 0x3F6;
1086 res = &dev->resource[1];
1087 res->flags = LEGACY_IO_RESOURCE;
1088 pcibios_bus_to_resource(dev, res, &region);
1089 }
1090 if ((progif & 4) == 0) {
1091 region.start = 0x170;
1092 region.end = 0x177;
1093 res = &dev->resource[2];
1094 res->flags = LEGACY_IO_RESOURCE;
1095 pcibios_bus_to_resource(dev, res, &region);
1096 region.start = 0x376;
1097 region.end = 0x376;
1098 res = &dev->resource[3];
1099 res->flags = LEGACY_IO_RESOURCE;
1100 pcibios_bus_to_resource(dev, res, &region);
1101 }
1102 }
1103 break;
1104
1105 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1106 if (class != PCI_CLASS_BRIDGE_PCI)
1107 goto bad;
1108 /* The PCI-to-PCI bridge spec requires that subtractive
1109 decoding (i.e. transparent) bridge must have programming
1110 interface code of 0x01. */
1111 pci_read_irq(dev);
1112 dev->transparent = ((dev->class & 0xff) == 1);
1113 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1114 set_pcie_hotplug_bridge(dev);
1115 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1116 if (pos) {
1117 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1118 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1119 }
1120 break;
1121
1122 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1123 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1124 goto bad;
1125 pci_read_irq(dev);
1126 pci_read_bases(dev, 1, 0);
1127 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1128 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1129 break;
1130
1131 default: /* unknown header */
1132 dev_err(&dev->dev, "unknown header type %02x, "
1133 "ignoring device\n", dev->hdr_type);
1134 return -EIO;
1135
1136 bad:
1137 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1138 "type %02x)\n", dev->class, dev->hdr_type);
1139 dev->class = PCI_CLASS_NOT_DEFINED;
1140 }
1141
1142 /* We found a fine healthy device, go go go... */
1143 return 0;
1144 }
1145
1146 static void pci_release_capabilities(struct pci_dev *dev)
1147 {
1148 pci_vpd_release(dev);
1149 pci_iov_release(dev);
1150 pci_free_cap_save_buffers(dev);
1151 }
1152
1153 /**
1154 * pci_release_dev - free a pci device structure when all users of it are finished.
1155 * @dev: device that's been disconnected
1156 *
1157 * Will be called only by the device core when all users of this pci device are
1158 * done.
1159 */
1160 static void pci_release_dev(struct device *dev)
1161 {
1162 struct pci_dev *pci_dev;
1163
1164 pci_dev = to_pci_dev(dev);
1165 pci_release_capabilities(pci_dev);
1166 pci_release_of_node(pci_dev);
1167 pcibios_release_device(pci_dev);
1168 pci_bus_put(pci_dev->bus);
1169 kfree(pci_dev);
1170 }
1171
1172 /**
1173 * pci_cfg_space_size - get the configuration space size of the PCI device.
1174 * @dev: PCI device
1175 *
1176 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1177 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1178 * access it. Maybe we don't have a way to generate extended config space
1179 * accesses, or the device is behind a reverse Express bridge. So we try
1180 * reading the dword at 0x100 which must either be 0 or a valid extended
1181 * capability header.
1182 */
1183 int pci_cfg_space_size_ext(struct pci_dev *dev)
1184 {
1185 u32 status;
1186 int pos = PCI_CFG_SPACE_SIZE;
1187
1188 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1189 goto fail;
1190 if (status == 0xffffffff)
1191 goto fail;
1192
1193 return PCI_CFG_SPACE_EXP_SIZE;
1194
1195 fail:
1196 return PCI_CFG_SPACE_SIZE;
1197 }
1198
1199 int pci_cfg_space_size(struct pci_dev *dev)
1200 {
1201 int pos;
1202 u32 status;
1203 u16 class;
1204
1205 class = dev->class >> 8;
1206 if (class == PCI_CLASS_BRIDGE_HOST)
1207 return pci_cfg_space_size_ext(dev);
1208
1209 if (!pci_is_pcie(dev)) {
1210 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1211 if (!pos)
1212 goto fail;
1213
1214 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1215 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1216 goto fail;
1217 }
1218
1219 return pci_cfg_space_size_ext(dev);
1220
1221 fail:
1222 return PCI_CFG_SPACE_SIZE;
1223 }
1224
1225 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1226 {
1227 struct pci_dev *dev;
1228
1229 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1230 if (!dev)
1231 return NULL;
1232
1233 INIT_LIST_HEAD(&dev->bus_list);
1234 dev->dev.type = &pci_dev_type;
1235 dev->bus = pci_bus_get(bus);
1236
1237 return dev;
1238 }
1239 EXPORT_SYMBOL(pci_alloc_dev);
1240
1241 struct pci_dev *alloc_pci_dev(void)
1242 {
1243 return pci_alloc_dev(NULL);
1244 }
1245 EXPORT_SYMBOL(alloc_pci_dev);
1246
1247 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1248 int crs_timeout)
1249 {
1250 int delay = 1;
1251
1252 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1253 return false;
1254
1255 /* some broken boards return 0 or ~0 if a slot is empty: */
1256 if (*l == 0xffffffff || *l == 0x00000000 ||
1257 *l == 0x0000ffff || *l == 0xffff0000)
1258 return false;
1259
1260 /* Configuration request Retry Status */
1261 while (*l == 0xffff0001) {
1262 if (!crs_timeout)
1263 return false;
1264
1265 msleep(delay);
1266 delay *= 2;
1267 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1268 return false;
1269 /* Card hasn't responded in 60 seconds? Must be stuck. */
1270 if (delay > crs_timeout) {
1271 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1272 "responding\n", pci_domain_nr(bus),
1273 bus->number, PCI_SLOT(devfn),
1274 PCI_FUNC(devfn));
1275 return false;
1276 }
1277 }
1278
1279 return true;
1280 }
1281 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1282
1283 /*
1284 * Read the config data for a PCI device, sanity-check it
1285 * and fill in the dev structure...
1286 */
1287 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1288 {
1289 struct pci_dev *dev;
1290 u32 l;
1291
1292 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1293 return NULL;
1294
1295 dev = pci_alloc_dev(bus);
1296 if (!dev)
1297 return NULL;
1298
1299 dev->devfn = devfn;
1300 dev->vendor = l & 0xffff;
1301 dev->device = (l >> 16) & 0xffff;
1302
1303 pci_set_of_node(dev);
1304
1305 if (pci_setup_device(dev)) {
1306 pci_bus_put(dev->bus);
1307 kfree(dev);
1308 return NULL;
1309 }
1310
1311 return dev;
1312 }
1313
1314 static void pci_init_capabilities(struct pci_dev *dev)
1315 {
1316 /* MSI/MSI-X list */
1317 pci_msi_init_pci_dev(dev);
1318
1319 /* Buffers for saving PCIe and PCI-X capabilities */
1320 pci_allocate_cap_save_buffers(dev);
1321
1322 /* Power Management */
1323 pci_pm_init(dev);
1324
1325 /* Vital Product Data */
1326 pci_vpd_pci22_init(dev);
1327
1328 /* Alternative Routing-ID Forwarding */
1329 pci_configure_ari(dev);
1330
1331 /* Single Root I/O Virtualization */
1332 pci_iov_init(dev);
1333
1334 /* Enable ACS P2P upstream forwarding */
1335 pci_enable_acs(dev);
1336 }
1337
1338 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1339 {
1340 int ret;
1341
1342 device_initialize(&dev->dev);
1343 dev->dev.release = pci_release_dev;
1344
1345 set_dev_node(&dev->dev, pcibus_to_node(bus));
1346 dev->dev.dma_mask = &dev->dma_mask;
1347 dev->dev.dma_parms = &dev->dma_parms;
1348 dev->dev.coherent_dma_mask = 0xffffffffull;
1349
1350 pci_set_dma_max_seg_size(dev, 65536);
1351 pci_set_dma_seg_boundary(dev, 0xffffffff);
1352
1353 /* Fix up broken headers */
1354 pci_fixup_device(pci_fixup_header, dev);
1355
1356 /* moved out from quirk header fixup code */
1357 pci_reassigndev_resource_alignment(dev);
1358
1359 /* Clear the state_saved flag. */
1360 dev->state_saved = false;
1361
1362 /* Initialize various capabilities */
1363 pci_init_capabilities(dev);
1364
1365 /*
1366 * Add the device to our list of discovered devices
1367 * and the bus list for fixup functions, etc.
1368 */
1369 down_write(&pci_bus_sem);
1370 list_add_tail(&dev->bus_list, &bus->devices);
1371 up_write(&pci_bus_sem);
1372
1373 ret = pcibios_add_device(dev);
1374 WARN_ON(ret < 0);
1375
1376 /* Notifier could use PCI capabilities */
1377 dev->match_driver = false;
1378 ret = device_add(&dev->dev);
1379 WARN_ON(ret < 0);
1380
1381 pci_proc_attach_device(dev);
1382 }
1383
1384 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1385 {
1386 struct pci_dev *dev;
1387
1388 dev = pci_get_slot(bus, devfn);
1389 if (dev) {
1390 pci_dev_put(dev);
1391 return dev;
1392 }
1393
1394 dev = pci_scan_device(bus, devfn);
1395 if (!dev)
1396 return NULL;
1397
1398 pci_device_add(dev, bus);
1399
1400 return dev;
1401 }
1402 EXPORT_SYMBOL(pci_scan_single_device);
1403
1404 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1405 {
1406 int pos;
1407 u16 cap = 0;
1408 unsigned next_fn;
1409
1410 if (pci_ari_enabled(bus)) {
1411 if (!dev)
1412 return 0;
1413 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1414 if (!pos)
1415 return 0;
1416
1417 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1418 next_fn = PCI_ARI_CAP_NFN(cap);
1419 if (next_fn <= fn)
1420 return 0; /* protect against malformed list */
1421
1422 return next_fn;
1423 }
1424
1425 /* dev may be NULL for non-contiguous multifunction devices */
1426 if (!dev || dev->multifunction)
1427 return (fn + 1) % 8;
1428
1429 return 0;
1430 }
1431
1432 static int only_one_child(struct pci_bus *bus)
1433 {
1434 struct pci_dev *parent = bus->self;
1435
1436 if (!parent || !pci_is_pcie(parent))
1437 return 0;
1438 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1439 return 1;
1440 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1441 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1442 return 1;
1443 return 0;
1444 }
1445
1446 /**
1447 * pci_scan_slot - scan a PCI slot on a bus for devices.
1448 * @bus: PCI bus to scan
1449 * @devfn: slot number to scan (must have zero function.)
1450 *
1451 * Scan a PCI slot on the specified PCI bus for devices, adding
1452 * discovered devices to the @bus->devices list. New devices
1453 * will not have is_added set.
1454 *
1455 * Returns the number of new devices found.
1456 */
1457 int pci_scan_slot(struct pci_bus *bus, int devfn)
1458 {
1459 unsigned fn, nr = 0;
1460 struct pci_dev *dev;
1461
1462 if (only_one_child(bus) && (devfn > 0))
1463 return 0; /* Already scanned the entire slot */
1464
1465 dev = pci_scan_single_device(bus, devfn);
1466 if (!dev)
1467 return 0;
1468 if (!dev->is_added)
1469 nr++;
1470
1471 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1472 dev = pci_scan_single_device(bus, devfn + fn);
1473 if (dev) {
1474 if (!dev->is_added)
1475 nr++;
1476 dev->multifunction = 1;
1477 }
1478 }
1479
1480 /* only one slot has pcie device */
1481 if (bus->self && nr)
1482 pcie_aspm_init_link_state(bus->self);
1483
1484 return nr;
1485 }
1486
1487 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1488 {
1489 u8 *smpss = data;
1490
1491 if (!pci_is_pcie(dev))
1492 return 0;
1493
1494 /*
1495 * We don't have a way to change MPS settings on devices that have
1496 * drivers attached. A hot-added device might support only the minimum
1497 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1498 * where devices may be hot-added, we limit the fabric MPS to 128 so
1499 * hot-added devices will work correctly.
1500 *
1501 * However, if we hot-add a device to a slot directly below a Root
1502 * Port, it's impossible for there to be other existing devices below
1503 * the port. We don't limit the MPS in this case because we can
1504 * reconfigure MPS on both the Root Port and the hot-added device,
1505 * and there are no other devices involved.
1506 *
1507 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1508 */
1509 if (dev->is_hotplug_bridge &&
1510 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1511 *smpss = 0;
1512
1513 if (*smpss > dev->pcie_mpss)
1514 *smpss = dev->pcie_mpss;
1515
1516 return 0;
1517 }
1518
1519 static void pcie_write_mps(struct pci_dev *dev, int mps)
1520 {
1521 int rc;
1522
1523 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1524 mps = 128 << dev->pcie_mpss;
1525
1526 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1527 dev->bus->self)
1528 /* For "Performance", the assumption is made that
1529 * downstream communication will never be larger than
1530 * the MRRS. So, the MPS only needs to be configured
1531 * for the upstream communication. This being the case,
1532 * walk from the top down and set the MPS of the child
1533 * to that of the parent bus.
1534 *
1535 * Configure the device MPS with the smaller of the
1536 * device MPSS or the bridge MPS (which is assumed to be
1537 * properly configured at this point to the largest
1538 * allowable MPS based on its parent bus).
1539 */
1540 mps = min(mps, pcie_get_mps(dev->bus->self));
1541 }
1542
1543 rc = pcie_set_mps(dev, mps);
1544 if (rc)
1545 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1546 }
1547
1548 static void pcie_write_mrrs(struct pci_dev *dev)
1549 {
1550 int rc, mrrs;
1551
1552 /* In the "safe" case, do not configure the MRRS. There appear to be
1553 * issues with setting MRRS to 0 on a number of devices.
1554 */
1555 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1556 return;
1557
1558 /* For Max performance, the MRRS must be set to the largest supported
1559 * value. However, it cannot be configured larger than the MPS the
1560 * device or the bus can support. This should already be properly
1561 * configured by a prior call to pcie_write_mps.
1562 */
1563 mrrs = pcie_get_mps(dev);
1564
1565 /* MRRS is a R/W register. Invalid values can be written, but a
1566 * subsequent read will verify if the value is acceptable or not.
1567 * If the MRRS value provided is not acceptable (e.g., too large),
1568 * shrink the value until it is acceptable to the HW.
1569 */
1570 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1571 rc = pcie_set_readrq(dev, mrrs);
1572 if (!rc)
1573 break;
1574
1575 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1576 mrrs /= 2;
1577 }
1578
1579 if (mrrs < 128)
1580 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1581 "safe value. If problems are experienced, try running "
1582 "with pci=pcie_bus_safe.\n");
1583 }
1584
1585 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1586 {
1587 int mps, orig_mps;
1588
1589 if (!pci_is_pcie(dev))
1590 return 0;
1591
1592 mps = 128 << *(u8 *)data;
1593 orig_mps = pcie_get_mps(dev);
1594
1595 pcie_write_mps(dev, mps);
1596 pcie_write_mrrs(dev);
1597
1598 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
1599 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1600 orig_mps, pcie_get_readrq(dev));
1601
1602 return 0;
1603 }
1604
1605 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1606 * parents then children fashion. If this changes, then this code will not
1607 * work as designed.
1608 */
1609 void pcie_bus_configure_settings(struct pci_bus *bus)
1610 {
1611 u8 smpss;
1612
1613 if (!bus->self)
1614 return;
1615
1616 if (!pci_is_pcie(bus->self))
1617 return;
1618
1619 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1620 return;
1621
1622 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1623 * to be aware of the MPS of the destination. To work around this,
1624 * simply force the MPS of the entire system to the smallest possible.
1625 */
1626 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1627 smpss = 0;
1628
1629 if (pcie_bus_config == PCIE_BUS_SAFE) {
1630 smpss = bus->self->pcie_mpss;
1631
1632 pcie_find_smpss(bus->self, &smpss);
1633 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1634 }
1635
1636 pcie_bus_configure_set(bus->self, &smpss);
1637 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1638 }
1639 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1640
1641 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1642 {
1643 unsigned int devfn, pass, max = bus->busn_res.start;
1644 struct pci_dev *dev;
1645
1646 dev_dbg(&bus->dev, "scanning bus\n");
1647
1648 /* Go find them, Rover! */
1649 for (devfn = 0; devfn < 0x100; devfn += 8)
1650 pci_scan_slot(bus, devfn);
1651
1652 /* Reserve buses for SR-IOV capability. */
1653 max += pci_iov_bus_range(bus);
1654
1655 /*
1656 * After performing arch-dependent fixup of the bus, look behind
1657 * all PCI-to-PCI bridges on this bus.
1658 */
1659 if (!bus->is_added) {
1660 dev_dbg(&bus->dev, "fixups for bus\n");
1661 pcibios_fixup_bus(bus);
1662 bus->is_added = 1;
1663 }
1664
1665 for (pass=0; pass < 2; pass++)
1666 list_for_each_entry(dev, &bus->devices, bus_list) {
1667 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1668 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1669 max = pci_scan_bridge(bus, dev, max, pass);
1670 }
1671
1672 /*
1673 * We've scanned the bus and so we know all about what's on
1674 * the other side of any bridges that may be on this bus plus
1675 * any devices.
1676 *
1677 * Return how far we've got finding sub-buses.
1678 */
1679 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1680 return max;
1681 }
1682
1683 /**
1684 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1685 * @bridge: Host bridge to set up.
1686 *
1687 * Default empty implementation. Replace with an architecture-specific setup
1688 * routine, if necessary.
1689 */
1690 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1691 {
1692 return 0;
1693 }
1694
1695 void __weak pcibios_add_bus(struct pci_bus *bus)
1696 {
1697 }
1698
1699 void __weak pcibios_remove_bus(struct pci_bus *bus)
1700 {
1701 }
1702
1703 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1704 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1705 {
1706 int error;
1707 struct pci_host_bridge *bridge;
1708 struct pci_bus *b, *b2;
1709 struct pci_host_bridge_window *window, *n;
1710 struct resource *res;
1711 resource_size_t offset;
1712 char bus_addr[64];
1713 char *fmt;
1714
1715 b = pci_alloc_bus();
1716 if (!b)
1717 return NULL;
1718
1719 b->sysdata = sysdata;
1720 b->ops = ops;
1721 b->number = b->busn_res.start = bus;
1722 b2 = pci_find_bus(pci_domain_nr(b), bus);
1723 if (b2) {
1724 /* If we already got to this bus through a different bridge, ignore it */
1725 dev_dbg(&b2->dev, "bus already known\n");
1726 goto err_out;
1727 }
1728
1729 bridge = pci_alloc_host_bridge(b);
1730 if (!bridge)
1731 goto err_out;
1732
1733 bridge->dev.parent = parent;
1734 bridge->dev.release = pci_release_host_bridge_dev;
1735 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1736 error = pcibios_root_bridge_prepare(bridge);
1737 if (error) {
1738 kfree(bridge);
1739 goto err_out;
1740 }
1741
1742 error = device_register(&bridge->dev);
1743 if (error) {
1744 put_device(&bridge->dev);
1745 goto err_out;
1746 }
1747 b->bridge = get_device(&bridge->dev);
1748 device_enable_async_suspend(b->bridge);
1749 pci_set_bus_of_node(b);
1750
1751 if (!parent)
1752 set_dev_node(b->bridge, pcibus_to_node(b));
1753
1754 b->dev.class = &pcibus_class;
1755 b->dev.parent = b->bridge;
1756 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1757 error = device_register(&b->dev);
1758 if (error)
1759 goto class_dev_reg_err;
1760
1761 pcibios_add_bus(b);
1762
1763 /* Create legacy_io and legacy_mem files for this bus */
1764 pci_create_legacy_files(b);
1765
1766 if (parent)
1767 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1768 else
1769 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1770
1771 /* Add initial resources to the bus */
1772 list_for_each_entry_safe(window, n, resources, list) {
1773 list_move_tail(&window->list, &bridge->windows);
1774 res = window->res;
1775 offset = window->offset;
1776 if (res->flags & IORESOURCE_BUS)
1777 pci_bus_insert_busn_res(b, bus, res->end);
1778 else
1779 pci_bus_add_resource(b, res, 0);
1780 if (offset) {
1781 if (resource_type(res) == IORESOURCE_IO)
1782 fmt = " (bus address [%#06llx-%#06llx])";
1783 else
1784 fmt = " (bus address [%#010llx-%#010llx])";
1785 snprintf(bus_addr, sizeof(bus_addr), fmt,
1786 (unsigned long long) (res->start - offset),
1787 (unsigned long long) (res->end - offset));
1788 } else
1789 bus_addr[0] = '\0';
1790 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1791 }
1792
1793 down_write(&pci_bus_sem);
1794 list_add_tail(&b->node, &pci_root_buses);
1795 up_write(&pci_bus_sem);
1796
1797 return b;
1798
1799 class_dev_reg_err:
1800 put_device(&bridge->dev);
1801 device_unregister(&bridge->dev);
1802 err_out:
1803 kfree(b);
1804 return NULL;
1805 }
1806
1807 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1808 {
1809 struct resource *res = &b->busn_res;
1810 struct resource *parent_res, *conflict;
1811
1812 res->start = bus;
1813 res->end = bus_max;
1814 res->flags = IORESOURCE_BUS;
1815
1816 if (!pci_is_root_bus(b))
1817 parent_res = &b->parent->busn_res;
1818 else {
1819 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1820 res->flags |= IORESOURCE_PCI_FIXED;
1821 }
1822
1823 conflict = insert_resource_conflict(parent_res, res);
1824
1825 if (conflict)
1826 dev_printk(KERN_DEBUG, &b->dev,
1827 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1828 res, pci_is_root_bus(b) ? "domain " : "",
1829 parent_res, conflict->name, conflict);
1830
1831 return conflict == NULL;
1832 }
1833
1834 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1835 {
1836 struct resource *res = &b->busn_res;
1837 struct resource old_res = *res;
1838 resource_size_t size;
1839 int ret;
1840
1841 if (res->start > bus_max)
1842 return -EINVAL;
1843
1844 size = bus_max - res->start + 1;
1845 ret = adjust_resource(res, res->start, size);
1846 dev_printk(KERN_DEBUG, &b->dev,
1847 "busn_res: %pR end %s updated to %02x\n",
1848 &old_res, ret ? "can not be" : "is", bus_max);
1849
1850 if (!ret && !res->parent)
1851 pci_bus_insert_busn_res(b, res->start, res->end);
1852
1853 return ret;
1854 }
1855
1856 void pci_bus_release_busn_res(struct pci_bus *b)
1857 {
1858 struct resource *res = &b->busn_res;
1859 int ret;
1860
1861 if (!res->flags || !res->parent)
1862 return;
1863
1864 ret = release_resource(res);
1865 dev_printk(KERN_DEBUG, &b->dev,
1866 "busn_res: %pR %s released\n",
1867 res, ret ? "can not be" : "is");
1868 }
1869
1870 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1871 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1872 {
1873 struct pci_host_bridge_window *window;
1874 bool found = false;
1875 struct pci_bus *b;
1876 int max;
1877
1878 list_for_each_entry(window, resources, list)
1879 if (window->res->flags & IORESOURCE_BUS) {
1880 found = true;
1881 break;
1882 }
1883
1884 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1885 if (!b)
1886 return NULL;
1887
1888 if (!found) {
1889 dev_info(&b->dev,
1890 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1891 bus);
1892 pci_bus_insert_busn_res(b, bus, 255);
1893 }
1894
1895 max = pci_scan_child_bus(b);
1896
1897 if (!found)
1898 pci_bus_update_busn_res_end(b, max);
1899
1900 pci_bus_add_devices(b);
1901 return b;
1902 }
1903 EXPORT_SYMBOL(pci_scan_root_bus);
1904
1905 /* Deprecated; use pci_scan_root_bus() instead */
1906 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1907 int bus, struct pci_ops *ops, void *sysdata)
1908 {
1909 LIST_HEAD(resources);
1910 struct pci_bus *b;
1911
1912 pci_add_resource(&resources, &ioport_resource);
1913 pci_add_resource(&resources, &iomem_resource);
1914 pci_add_resource(&resources, &busn_resource);
1915 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1916 if (b)
1917 pci_scan_child_bus(b);
1918 else
1919 pci_free_resource_list(&resources);
1920 return b;
1921 }
1922 EXPORT_SYMBOL(pci_scan_bus_parented);
1923
1924 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1925 void *sysdata)
1926 {
1927 LIST_HEAD(resources);
1928 struct pci_bus *b;
1929
1930 pci_add_resource(&resources, &ioport_resource);
1931 pci_add_resource(&resources, &iomem_resource);
1932 pci_add_resource(&resources, &busn_resource);
1933 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1934 if (b) {
1935 pci_scan_child_bus(b);
1936 pci_bus_add_devices(b);
1937 } else {
1938 pci_free_resource_list(&resources);
1939 }
1940 return b;
1941 }
1942 EXPORT_SYMBOL(pci_scan_bus);
1943
1944 /**
1945 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1946 * @bridge: PCI bridge for the bus to scan
1947 *
1948 * Scan a PCI bus and child buses for new devices, add them,
1949 * and enable them, resizing bridge mmio/io resource if necessary
1950 * and possible. The caller must ensure the child devices are already
1951 * removed for resizing to occur.
1952 *
1953 * Returns the max number of subordinate bus discovered.
1954 */
1955 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1956 {
1957 unsigned int max;
1958 struct pci_bus *bus = bridge->subordinate;
1959
1960 max = pci_scan_child_bus(bus);
1961
1962 pci_assign_unassigned_bridge_resources(bridge);
1963
1964 pci_bus_add_devices(bus);
1965
1966 return max;
1967 }
1968
1969 /**
1970 * pci_rescan_bus - scan a PCI bus for devices.
1971 * @bus: PCI bus to scan
1972 *
1973 * Scan a PCI bus and child buses for new devices, adds them,
1974 * and enables them.
1975 *
1976 * Returns the max number of subordinate bus discovered.
1977 */
1978 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1979 {
1980 unsigned int max;
1981
1982 max = pci_scan_child_bus(bus);
1983 pci_assign_unassigned_bus_resources(bus);
1984 pci_enable_bridges(bus);
1985 pci_bus_add_devices(bus);
1986
1987 return max;
1988 }
1989 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1990
1991 EXPORT_SYMBOL(pci_add_new_bus);
1992 EXPORT_SYMBOL(pci_scan_slot);
1993 EXPORT_SYMBOL(pci_scan_bridge);
1994 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1995
1996 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1997 {
1998 const struct pci_dev *a = to_pci_dev(d_a);
1999 const struct pci_dev *b = to_pci_dev(d_b);
2000
2001 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2002 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2003
2004 if (a->bus->number < b->bus->number) return -1;
2005 else if (a->bus->number > b->bus->number) return 1;
2006
2007 if (a->devfn < b->devfn) return -1;
2008 else if (a->devfn > b->devfn) return 1;
2009
2010 return 0;
2011 }
2012
2013 void __init pci_sort_breadthfirst(void)
2014 {
2015 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2016 }
This page took 0.12494 seconds and 6 git commands to generate.