Merge remote-tracking branches 'spi/fix/omap2' and 'spi/fix/rockchip' into spi-linus
[deliverable/linux.git] / arch / powerpc / sysdev / ppc4xx_pci.c
CommitLineData
5738ec6d
BH
1/*
2 * PCI / PCI-X / PCI-Express support for 4xx parts
3 *
4 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5 *
a2d2e1ec
BH
6 * Most PCI Express code is coming from Stefan Roese implementation for
7 * arch/ppc in the Denx tree, slightly reworked by me.
8 *
9 * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
10 *
11 * Some of that comes itself from a previous implementation for 440SPE only
12 * by Roland Dreier:
13 *
14 * Copyright (c) 2005 Cisco Systems. All rights reserved.
15 * Roland Dreier <rolandd@cisco.com>
16 *
5738ec6d
BH
17 */
18
035ee428
BH
19#undef DEBUG
20
5738ec6d
BH
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/of.h>
a2d2e1ec 25#include <linux/delay.h>
5a0e3ad6 26#include <linux/slab.h>
5738ec6d
BH
27
28#include <asm/io.h>
29#include <asm/pci-bridge.h>
30#include <asm/machdep.h>
a2d2e1ec
BH
31#include <asm/dcr.h>
32#include <asm/dcr-regs.h>
cc2e113b 33#include <mm/mmu_decl.h>
5738ec6d
BH
34
35#include "ppc4xx_pci.h"
36
37static int dma_offset_set;
38
a2d2e1ec
BH
39#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
40#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
41
8308c54d
JF
42#define RES_TO_U32_LOW(val) \
43 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
44#define RES_TO_U32_HIGH(val) \
45 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
a2d2e1ec 46
accf5ef2
SR
47static inline int ppc440spe_revA(void)
48{
49 /* Catch both 440SPe variants, with and without RAID6 support */
50 if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
51 return 1;
52 else
53 return 0;
54}
55
c839e0ef
BH
56static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
57{
58 struct pci_controller *hose;
59 int i;
60
61 if (dev->devfn != 0 || dev->bus->self != NULL)
62 return;
63
64 hose = pci_bus_to_host(dev->bus);
65 if (hose == NULL)
66 return;
67
68 if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
69 !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
70 !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
71 return;
72
5ce4b596
JB
73 if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
74 of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
75 hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
76 }
77
c839e0ef
BH
78 /* Hide the PCI host BARs from the kernel as their content doesn't
79 * fit well in the resource management
80 */
81 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
82 dev->resource[i].start = dev->resource[i].end = 0;
83 dev->resource[i].flags = 0;
84 }
85
86 printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
87 pci_name(dev));
88}
89DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
90
5738ec6d
BH
91static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
92 void __iomem *reg,
93 struct resource *res)
94{
95 u64 size;
96 const u32 *ranges;
97 int rlen;
98 int pna = of_n_addr_cells(hose->dn);
99 int np = pna + 5;
100
101 /* Default */
102 res->start = 0;
cc2e113b
IY
103 size = 0x80000000;
104 res->end = size - 1;
5738ec6d
BH
105 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
106
107 /* Get dma-ranges property */
108 ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
109 if (ranges == NULL)
110 goto out;
111
112 /* Walk it */
113 while ((rlen -= np * 4) >= 0) {
114 u32 pci_space = ranges[0];
115 u64 pci_addr = of_read_number(ranges + 1, 2);
116 u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
117 size = of_read_number(ranges + pna + 3, 2);
118 ranges += np;
119 if (cpu_addr == OF_BAD_ADDR || size == 0)
120 continue;
121
122 /* We only care about memory */
123 if ((pci_space & 0x03000000) != 0x02000000)
124 continue;
125
126 /* We currently only support memory at 0, and pci_addr
127 * within 32 bits space
128 */
129 if (cpu_addr != 0 || pci_addr > 0xffffffff) {
130 printk(KERN_WARNING "%s: Ignored unsupported dma range"
131 " 0x%016llx...0x%016llx -> 0x%016llx\n",
132 hose->dn->full_name,
133 pci_addr, pci_addr + size - 1, cpu_addr);
134 continue;
135 }
136
137 /* Check if not prefetchable */
138 if (!(pci_space & 0x40000000))
139 res->flags &= ~IORESOURCE_PREFETCH;
140
141
142 /* Use that */
143 res->start = pci_addr;
5738ec6d 144 /* Beware of 32 bits resources */
8308c54d
JF
145 if (sizeof(resource_size_t) == sizeof(u32) &&
146 (pci_addr + size) > 0x100000000ull)
5738ec6d
BH
147 res->end = 0xffffffff;
148 else
5738ec6d
BH
149 res->end = res->start + size - 1;
150 break;
151 }
152
153 /* We only support one global DMA offset */
154 if (dma_offset_set && pci_dram_offset != res->start) {
155 printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
156 hose->dn->full_name);
157 return -ENXIO;
158 }
159
160 /* Check that we can fit all of memory as we don't support
161 * DMA bounce buffers
162 */
163 if (size < total_memory) {
164 printk(KERN_ERR "%s: dma-ranges too small "
cc2e113b
IY
165 "(size=%llx total_memory=%llx)\n",
166 hose->dn->full_name, size, (u64)total_memory);
5738ec6d
BH
167 return -ENXIO;
168 }
169
170 /* Check we are a power of 2 size and that base is a multiple of size*/
cc2e113b 171 if ((size & (size - 1)) != 0 ||
5738ec6d
BH
172 (res->start & (size - 1)) != 0) {
173 printk(KERN_ERR "%s: dma-ranges unaligned\n",
174 hose->dn->full_name);
175 return -ENXIO;
176 }
177
e2c37d90
AP
178 /* Check that we are fully contained within 32 bits space if we are not
179 * running on a 460sx or 476fpe which have 64 bit bus addresses.
180 */
181 if (res->end > 0xffffffff &&
182 !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
183 || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
5738ec6d
BH
184 printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
185 hose->dn->full_name);
186 return -ENXIO;
187 }
188 out:
189 dma_offset_set = 1;
190 pci_dram_offset = res->start;
466c2bc7
TB
191 hose->dma_window_base_cur = res->start;
192 hose->dma_window_size = resource_size(res);
5738ec6d
BH
193
194 printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
195 pci_dram_offset);
466c2bc7
TB
196 printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
197 (unsigned long long)hose->dma_window_base_cur);
198 printk(KERN_INFO "DMA window size 0x%016llx\n",
199 (unsigned long long)hose->dma_window_size);
5738ec6d
BH
200 return 0;
201}
202
203/*
204 * 4xx PCI 2.x part
205 */
c839e0ef 206
84d727a1
BH
207static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
208 void __iomem *reg,
209 u64 plb_addr,
210 u64 pci_addr,
211 u64 size,
212 unsigned int flags,
213 int index)
214{
215 u32 ma, pcila, pciha;
216
1ac00cc2
BH
217 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
218 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
219 * address are actually hard wired to a value that appears to depend
220 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
221 *
222 * The trick here is we just crop those top bits and ignore them when
223 * programming the chip. That means the device-tree has to be right
224 * for the specific part used (we don't print a warning if it's wrong
225 * but on the other hand, you'll crash quickly enough), but at least
226 * this code should work whatever the hard coded value is
227 */
228 plb_addr &= 0xffffffffull;
229
230 /* Note: Due to the above hack, the test below doesn't actually test
231 * if you address is above 4G, but it tests that address and
232 * (address + size) are both contained in the same 4G
233 */
84d727a1
BH
234 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
235 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
236 printk(KERN_WARNING "%s: Resource out of range\n",
237 hose->dn->full_name);
238 return -1;
239 }
240 ma = (0xffffffffu << ilog2(size)) | 1;
241 if (flags & IORESOURCE_PREFETCH)
242 ma |= 2;
243
244 pciha = RES_TO_U32_HIGH(pci_addr);
245 pcila = RES_TO_U32_LOW(pci_addr);
246
247 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
248 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
249 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
250 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
251
252 return 0;
253}
254
c839e0ef
BH
255static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
256 void __iomem *reg)
257{
84d727a1 258 int i, j, found_isa_hole = 0;
c839e0ef
BH
259
260 /* Setup outbound memory windows */
261 for (i = j = 0; i < 3; i++) {
262 struct resource *res = &hose->mem_resources[i];
3fd47f06 263 resource_size_t offset = hose->mem_offset[i];
c839e0ef
BH
264
265 /* we only care about memory windows */
266 if (!(res->flags & IORESOURCE_MEM))
267 continue;
268 if (j > 2) {
269 printk(KERN_WARNING "%s: Too many ranges\n",
270 hose->dn->full_name);
271 break;
272 }
273
84d727a1
BH
274 /* Configure the resource */
275 if (ppc4xx_setup_one_pci_PMM(hose, reg,
276 res->start,
3fd47f06 277 res->start - offset,
28f65c11 278 resource_size(res),
84d727a1
BH
279 res->flags,
280 j) == 0) {
281 j++;
282
283 /* If the resource PCI address is 0 then we have our
284 * ISA memory hole
285 */
3fd47f06 286 if (res->start == offset)
84d727a1 287 found_isa_hole = 1;
c839e0ef 288 }
c839e0ef 289 }
84d727a1
BH
290
291 /* Handle ISA memory hole if not already covered */
292 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
293 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
294 hose->isa_mem_size, 0, j) == 0)
295 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
296 hose->dn->full_name);
c839e0ef
BH
297}
298
299static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
300 void __iomem *reg,
301 const struct resource *res)
302{
28f65c11 303 resource_size_t size = resource_size(res);
c839e0ef
BH
304 u32 sa;
305
306 /* Calculate window size */
307 sa = (0xffffffffu << ilog2(size)) | 1;
308 sa |= 0x1;
309
310 /* RAM is always at 0 local for now */
311 writel(0, reg + PCIL0_PTM1LA);
312 writel(sa, reg + PCIL0_PTM1MS);
313
314 /* Map on PCI side */
315 early_write_config_dword(hose, hose->first_busno, 0,
316 PCI_BASE_ADDRESS_1, res->start);
317 early_write_config_dword(hose, hose->first_busno, 0,
318 PCI_BASE_ADDRESS_2, 0x00000000);
319 early_write_config_word(hose, hose->first_busno, 0,
320 PCI_COMMAND, 0x0006);
321}
322
5738ec6d
BH
323static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
324{
325 /* NYI */
c839e0ef
BH
326 struct resource rsrc_cfg;
327 struct resource rsrc_reg;
328 struct resource dma_window;
329 struct pci_controller *hose = NULL;
330 void __iomem *reg = NULL;
331 const int *bus_range;
332 int primary = 0;
333
5a013fc7
MF
334 /* Check if device is enabled */
335 if (!of_device_is_available(np)) {
336 printk(KERN_INFO "%s: Port disabled via device-tree\n",
337 np->full_name);
338 return;
339 }
340
c839e0ef
BH
341 /* Fetch config space registers address */
342 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
5a013fc7 343 printk(KERN_ERR "%s: Can't get PCI config register base !",
c839e0ef
BH
344 np->full_name);
345 return;
346 }
347 /* Fetch host bridge internal registers address */
348 if (of_address_to_resource(np, 3, &rsrc_reg)) {
349 printk(KERN_ERR "%s: Can't get PCI internal register base !",
350 np->full_name);
351 return;
352 }
353
354 /* Check if primary bridge */
355 if (of_get_property(np, "primary", NULL))
356 primary = 1;
357
358 /* Get bus range if any */
359 bus_range = of_get_property(np, "bus-range", NULL);
360
361 /* Map registers */
28f65c11 362 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
c839e0ef
BH
363 if (reg == NULL) {
364 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
365 goto fail;
366 }
367
368 /* Allocate the host controller data structure */
369 hose = pcibios_alloc_controller(np);
370 if (!hose)
371 goto fail;
372
373 hose->first_busno = bus_range ? bus_range[0] : 0x0;
374 hose->last_busno = bus_range ? bus_range[1] : 0xff;
375
376 /* Setup config space */
377 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
378
379 /* Disable all windows */
380 writel(0, reg + PCIL0_PMM0MA);
381 writel(0, reg + PCIL0_PMM1MA);
382 writel(0, reg + PCIL0_PMM2MA);
383 writel(0, reg + PCIL0_PTM1MS);
384 writel(0, reg + PCIL0_PTM2MS);
385
386 /* Parse outbound mapping resources */
387 pci_process_bridge_OF_ranges(hose, np, primary);
388
389 /* Parse inbound mapping resources */
390 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
391 goto fail;
392
393 /* Configure outbound ranges POMs */
394 ppc4xx_configure_pci_PMMs(hose, reg);
395
396 /* Configure inbound ranges PIMs */
397 ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
398
399 /* We don't need the registers anymore */
400 iounmap(reg);
401 return;
402
403 fail:
404 if (hose)
405 pcibios_free_controller(hose);
406 if (reg)
407 iounmap(reg);
5738ec6d
BH
408}
409
410/*
411 * 4xx PCI-X part
412 */
413
84d727a1
BH
414static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
415 void __iomem *reg,
416 u64 plb_addr,
417 u64 pci_addr,
418 u64 size,
419 unsigned int flags,
420 int index)
421{
422 u32 lah, lal, pciah, pcial, sa;
423
424 if (!is_power_of_2(size) || size < 0x1000 ||
425 (plb_addr & (size - 1)) != 0) {
426 printk(KERN_WARNING "%s: Resource out of range\n",
427 hose->dn->full_name);
428 return -1;
429 }
430
431 /* Calculate register values */
432 lah = RES_TO_U32_HIGH(plb_addr);
433 lal = RES_TO_U32_LOW(plb_addr);
434 pciah = RES_TO_U32_HIGH(pci_addr);
435 pcial = RES_TO_U32_LOW(pci_addr);
436 sa = (0xffffffffu << ilog2(size)) | 0x1;
437
438 /* Program register values */
439 if (index == 0) {
440 writel(lah, reg + PCIX0_POM0LAH);
441 writel(lal, reg + PCIX0_POM0LAL);
442 writel(pciah, reg + PCIX0_POM0PCIAH);
443 writel(pcial, reg + PCIX0_POM0PCIAL);
444 writel(sa, reg + PCIX0_POM0SA);
445 } else {
446 writel(lah, reg + PCIX0_POM1LAH);
447 writel(lal, reg + PCIX0_POM1LAL);
448 writel(pciah, reg + PCIX0_POM1PCIAH);
449 writel(pcial, reg + PCIX0_POM1PCIAL);
450 writel(sa, reg + PCIX0_POM1SA);
451 }
452
453 return 0;
454}
455
5738ec6d
BH
456static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
457 void __iomem *reg)
458{
84d727a1 459 int i, j, found_isa_hole = 0;
5738ec6d
BH
460
461 /* Setup outbound memory windows */
462 for (i = j = 0; i < 3; i++) {
463 struct resource *res = &hose->mem_resources[i];
3fd47f06 464 resource_size_t offset = hose->mem_offset[i];
5738ec6d
BH
465
466 /* we only care about memory windows */
467 if (!(res->flags & IORESOURCE_MEM))
468 continue;
469 if (j > 1) {
470 printk(KERN_WARNING "%s: Too many ranges\n",
471 hose->dn->full_name);
472 break;
473 }
474
84d727a1
BH
475 /* Configure the resource */
476 if (ppc4xx_setup_one_pcix_POM(hose, reg,
477 res->start,
3fd47f06 478 res->start - offset,
28f65c11 479 resource_size(res),
84d727a1
BH
480 res->flags,
481 j) == 0) {
482 j++;
483
484 /* If the resource PCI address is 0 then we have our
485 * ISA memory hole
486 */
3fd47f06 487 if (res->start == offset)
84d727a1 488 found_isa_hole = 1;
5738ec6d 489 }
5738ec6d 490 }
84d727a1
BH
491
492 /* Handle ISA memory hole if not already covered */
493 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
494 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
495 hose->isa_mem_size, 0, j) == 0)
496 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
497 hose->dn->full_name);
5738ec6d
BH
498}
499
500static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
501 void __iomem *reg,
502 const struct resource *res,
503 int big_pim,
504 int enable_msi_hole)
505{
28f65c11 506 resource_size_t size = resource_size(res);
5738ec6d
BH
507 u32 sa;
508
509 /* RAM is always at 0 */
510 writel(0x00000000, reg + PCIX0_PIM0LAH);
511 writel(0x00000000, reg + PCIX0_PIM0LAL);
512
513 /* Calculate window size */
514 sa = (0xffffffffu << ilog2(size)) | 1;
515 sa |= 0x1;
516 if (res->flags & IORESOURCE_PREFETCH)
517 sa |= 0x2;
518 if (enable_msi_hole)
519 sa |= 0x4;
520 writel(sa, reg + PCIX0_PIM0SA);
521 if (big_pim)
522 writel(0xffffffff, reg + PCIX0_PIM0SAH);
523
524 /* Map on PCI side */
525 writel(0x00000000, reg + PCIX0_BAR0H);
526 writel(res->start, reg + PCIX0_BAR0L);
527 writew(0x0006, reg + PCIX0_COMMAND);
528}
529
530static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
531{
532 struct resource rsrc_cfg;
533 struct resource rsrc_reg;
534 struct resource dma_window;
535 struct pci_controller *hose = NULL;
536 void __iomem *reg = NULL;
537 const int *bus_range;
538 int big_pim = 0, msi = 0, primary = 0;
539
540 /* Fetch config space registers address */
541 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
542 printk(KERN_ERR "%s:Can't get PCI-X config register base !",
543 np->full_name);
544 return;
545 }
546 /* Fetch host bridge internal registers address */
547 if (of_address_to_resource(np, 3, &rsrc_reg)) {
548 printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
549 np->full_name);
550 return;
551 }
552
553 /* Check if it supports large PIMs (440GX) */
554 if (of_get_property(np, "large-inbound-windows", NULL))
555 big_pim = 1;
556
557 /* Check if we should enable MSIs inbound hole */
558 if (of_get_property(np, "enable-msi-hole", NULL))
559 msi = 1;
560
561 /* Check if primary bridge */
562 if (of_get_property(np, "primary", NULL))
563 primary = 1;
564
565 /* Get bus range if any */
566 bus_range = of_get_property(np, "bus-range", NULL);
567
568 /* Map registers */
28f65c11 569 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
5738ec6d
BH
570 if (reg == NULL) {
571 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
572 goto fail;
573 }
574
575 /* Allocate the host controller data structure */
576 hose = pcibios_alloc_controller(np);
577 if (!hose)
578 goto fail;
579
580 hose->first_busno = bus_range ? bus_range[0] : 0x0;
581 hose->last_busno = bus_range ? bus_range[1] : 0xff;
582
583 /* Setup config space */
d234b3c3
SO
584 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
585 PPC_INDIRECT_TYPE_SET_CFG_TYPE);
5738ec6d
BH
586
587 /* Disable all windows */
588 writel(0, reg + PCIX0_POM0SA);
589 writel(0, reg + PCIX0_POM1SA);
590 writel(0, reg + PCIX0_POM2SA);
591 writel(0, reg + PCIX0_PIM0SA);
592 writel(0, reg + PCIX0_PIM1SA);
593 writel(0, reg + PCIX0_PIM2SA);
594 if (big_pim) {
595 writel(0, reg + PCIX0_PIM0SAH);
596 writel(0, reg + PCIX0_PIM2SAH);
597 }
598
599 /* Parse outbound mapping resources */
600 pci_process_bridge_OF_ranges(hose, np, primary);
601
602 /* Parse inbound mapping resources */
603 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
604 goto fail;
605
606 /* Configure outbound ranges POMs */
607 ppc4xx_configure_pcix_POMs(hose, reg);
608
609 /* Configure inbound ranges PIMs */
610 ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
611
612 /* We don't need the registers anymore */
613 iounmap(reg);
614 return;
615
616 fail:
617 if (hose)
618 pcibios_free_controller(hose);
619 if (reg)
620 iounmap(reg);
621}
622
a2d2e1ec
BH
623#ifdef CONFIG_PPC4xx_PCI_EXPRESS
624
5738ec6d
BH
625/*
626 * 4xx PCI-Express part
a2d2e1ec
BH
627 *
628 * We support 3 parts currently based on the compatible property:
629 *
accf5ef2 630 * ibm,plb-pciex-440spe
a2d2e1ec 631 * ibm,plb-pciex-405ex
66b7e504 632 * ibm,plb-pciex-460ex
a2d2e1ec
BH
633 *
634 * Anything else will be rejected for now as they are all subtly
635 * different unfortunately.
636 *
5738ec6d 637 */
a2d2e1ec 638
78994e24 639#define MAX_PCIE_BUS_MAPPED 0x40
a2d2e1ec
BH
640
641struct ppc4xx_pciex_port
642{
643 struct pci_controller *hose;
644 struct device_node *node;
645 unsigned int index;
646 int endpoint;
035ee428
BH
647 int link;
648 int has_ibpre;
a2d2e1ec
BH
649 unsigned int sdr_base;
650 dcr_host_t dcrs;
651 struct resource cfg_space;
652 struct resource utl_regs;
035ee428 653 void __iomem *utl_base;
a2d2e1ec
BH
654};
655
656static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
657static unsigned int ppc4xx_pciex_port_count;
658
659struct ppc4xx_pciex_hwops
660{
8115846e 661 bool want_sdr;
a2d2e1ec
BH
662 int (*core_init)(struct device_node *np);
663 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
664 int (*setup_utl)(struct ppc4xx_pciex_port *port);
112d1fe9 665 void (*check_link)(struct ppc4xx_pciex_port *port);
a2d2e1ec
BH
666};
667
668static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
669
112d1fe9
TB
670static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
671 unsigned int sdr_offset,
672 unsigned int mask,
673 unsigned int value,
674 int timeout_ms)
675{
676 u32 val;
677
678 while(timeout_ms--) {
679 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
680 if ((val & mask) == value) {
681 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
682 port->index, sdr_offset, timeout_ms, val);
683 return 0;
684 }
685 msleep(1);
686 }
687 return -1;
688}
689
690static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
691{
112d1fe9
TB
692 /* Wait for reset to complete */
693 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
694 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
695 port->index);
696 return -1;
697 }
698 return 0;
699}
700
883a805d 701
112d1fe9
TB
702static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
703{
a8e616b9
JB
704 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
705
112d1fe9
TB
706 /* Check for card presence detect if supported, if not, just wait for
707 * link unconditionally.
708 *
709 * note that we don't fail if there is no link, we just filter out
710 * config space accesses. That way, it will be easier to implement
711 * hotplug later on.
712 */
713 if (!port->has_ibpre ||
714 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
715 1 << 28, 1 << 28, 100)) {
716 printk(KERN_INFO
717 "PCIE%d: Device detected, waiting for link...\n",
718 port->index);
719 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
720 0x1000, 0x1000, 2000))
721 printk(KERN_WARNING
722 "PCIE%d: Link up failed\n", port->index);
723 else {
724 printk(KERN_INFO
725 "PCIE%d: link is up !\n", port->index);
726 port->link = 1;
727 }
728 } else
729 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
730}
731
883a805d
BH
732#ifdef CONFIG_44x
733
a2d2e1ec
BH
734/* Check various reset bits of the 440SPe PCIe core */
735static int __init ppc440spe_pciex_check_reset(struct device_node *np)
736{
737 u32 valPE0, valPE1, valPE2;
738 int err = 0;
739
740 /* SDR0_PEGPLLLCT1 reset */
741 if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
742 /*
743 * the PCIe core was probably already initialised
744 * by firmware - let's re-reset RCSSET regs
745 *
746 * -- Shouldn't we also re-reset the whole thing ? -- BenH
747 */
748 pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
749 mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
750 mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
751 mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
752 }
753
754 valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
755 valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
756 valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
757
758 /* SDR0_PExRCSSET rstgu */
759 if (!(valPE0 & 0x01000000) ||
760 !(valPE1 & 0x01000000) ||
761 !(valPE2 & 0x01000000)) {
762 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
763 err = -1;
764 }
765
766 /* SDR0_PExRCSSET rstdl */
767 if (!(valPE0 & 0x00010000) ||
768 !(valPE1 & 0x00010000) ||
769 !(valPE2 & 0x00010000)) {
770 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
771 err = -1;
772 }
773
774 /* SDR0_PExRCSSET rstpyn */
775 if ((valPE0 & 0x00001000) ||
776 (valPE1 & 0x00001000) ||
777 (valPE2 & 0x00001000)) {
778 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
779 err = -1;
780 }
781
782 /* SDR0_PExRCSSET hldplb */
783 if ((valPE0 & 0x10000000) ||
784 (valPE1 & 0x10000000) ||
785 (valPE2 & 0x10000000)) {
786 printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
787 err = -1;
788 }
789
790 /* SDR0_PExRCSSET rdy */
791 if ((valPE0 & 0x00100000) ||
792 (valPE1 & 0x00100000) ||
793 (valPE2 & 0x00100000)) {
794 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
795 err = -1;
796 }
797
798 /* SDR0_PExRCSSET shutdown */
799 if ((valPE0 & 0x00000100) ||
800 (valPE1 & 0x00000100) ||
801 (valPE2 & 0x00000100)) {
802 printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
803 err = -1;
804 }
805
806 return err;
807}
808
809/* Global PCIe core initializations for 440SPe core */
810static int __init ppc440spe_pciex_core_init(struct device_node *np)
811{
812 int time_out = 20;
813
814 /* Set PLL clock receiver to LVPECL */
6e42b21b 815 dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
a2d2e1ec
BH
816
817 /* Shouldn't we do all the calibration stuff etc... here ? */
818 if (ppc440spe_pciex_check_reset(np))
819 return -ENXIO;
820
821 if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
822 printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
823 "failed (0x%08x)\n",
824 mfdcri(SDR0, PESDR0_PLLLCT2));
825 return -1;
826 }
827
828 /* De-assert reset of PCIe PLL, wait for lock */
6e42b21b 829 dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
a2d2e1ec
BH
830 udelay(3);
831
832 while (time_out) {
833 if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
834 time_out--;
835 udelay(1);
836 } else
837 break;
838 }
839 if (!time_out) {
840 printk(KERN_INFO "PCIE: VCO output not locked\n");
841 return -1;
842 }
843
844 pr_debug("PCIE initialization OK\n");
845
846 return 3;
847}
848
9c57a32b 849static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
a2d2e1ec
BH
850{
851 u32 val = 1 << 24;
852
853 if (port->endpoint)
854 val = PTYPE_LEGACY_ENDPOINT << 20;
855 else
856 val = PTYPE_ROOT_PORT << 20;
857
858 if (port->index == 0)
859 val |= LNKW_X8 << 12;
860 else
861 val |= LNKW_X4 << 12;
862
863 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
864 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
accf5ef2 865 if (ppc440spe_revA())
a2d2e1ec
BH
866 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
867 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
868 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
869 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
870 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
871 if (port->index == 0) {
872 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
873 0x35000000);
874 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
875 0x35000000);
876 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
877 0x35000000);
878 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
879 0x35000000);
880 }
6e42b21b
VB
881 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
882 (1 << 24) | (1 << 16), 1 << 12);
a2d2e1ec 883
112d1fe9 884 return ppc4xx_pciex_port_reset_sdr(port);
a2d2e1ec
BH
885}
886
9c57a32b 887static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
035ee428
BH
888{
889 return ppc440spe_pciex_init_port_hw(port);
890}
891
9c57a32b 892static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
a2d2e1ec 893{
035ee428
BH
894 int rc = ppc440spe_pciex_init_port_hw(port);
895
896 port->has_ibpre = 1;
897
898 return rc;
899}
a2d2e1ec 900
035ee428
BH
901static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
902{
a2d2e1ec
BH
903 /* XXX Check what that value means... I hate magic */
904 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
905
a2d2e1ec
BH
906 /*
907 * Set buffer allocations and then assert VRB and TXE.
908 */
035ee428
BH
909 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
910 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
911 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
912 out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
913 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
914 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
915 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
916 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
a2d2e1ec 917
035ee428
BH
918 return 0;
919}
920
921static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
922{
923 /* Report CRS to the operating system */
924 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
a2d2e1ec
BH
925
926 return 0;
927}
928
929static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
930{
8115846e 931 .want_sdr = true,
a2d2e1ec 932 .core_init = ppc440spe_pciex_core_init,
035ee428 933 .port_init_hw = ppc440speA_pciex_init_port_hw,
a2d2e1ec 934 .setup_utl = ppc440speA_pciex_init_utl,
112d1fe9 935 .check_link = ppc4xx_pciex_check_link_sdr,
a2d2e1ec
BH
936};
937
938static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
939{
8115846e 940 .want_sdr = true,
a2d2e1ec 941 .core_init = ppc440spe_pciex_core_init,
035ee428
BH
942 .port_init_hw = ppc440speB_pciex_init_port_hw,
943 .setup_utl = ppc440speB_pciex_init_utl,
112d1fe9 944 .check_link = ppc4xx_pciex_check_link_sdr,
a2d2e1ec
BH
945};
946
66b7e504
SR
947static int __init ppc460ex_pciex_core_init(struct device_node *np)
948{
949 /* Nothing to do, return 2 ports */
950 return 2;
951}
952
9c57a32b 953static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
66b7e504
SR
954{
955 u32 val;
956 u32 utlset1;
957
5f91925c 958 if (port->endpoint)
66b7e504 959 val = PTYPE_LEGACY_ENDPOINT << 20;
5f91925c 960 else
66b7e504 961 val = PTYPE_ROOT_PORT << 20;
66b7e504
SR
962
963 if (port->index == 0) {
964 val |= LNKW_X1 << 12;
5f91925c 965 utlset1 = 0x20000000;
66b7e504
SR
966 } else {
967 val |= LNKW_X4 << 12;
5f91925c 968 utlset1 = 0x20101101;
66b7e504
SR
969 }
970
971 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
972 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
973 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
974
975 switch (port->index) {
976 case 0:
977 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
e30c9875 978 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
66b7e504
SR
979 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
980
981 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
982 break;
983
984 case 1:
985 mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
986 mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
987 mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
988 mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
e30c9875
TM
989 mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
990 mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
991 mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
992 mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
66b7e504
SR
993 mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
994 mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
995 mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
996 mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
997
998 mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
999 break;
1000 }
1001
1002 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1003 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1004 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1005
1006 /* Poll for PHY reset */
1007 /* XXX FIXME add timeout */
1008 switch (port->index) {
1009 case 0:
1010 while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
1011 udelay(10);
1012 break;
1013 case 1:
1014 while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1015 udelay(10);
1016 break;
1017 }
1018
1019 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1020 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1021 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1022 PESDRx_RCSSET_RSTPYN);
1023
1024 port->has_ibpre = 1;
1025
112d1fe9 1026 return ppc4xx_pciex_port_reset_sdr(port);
66b7e504
SR
1027}
1028
1029static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1030{
1031 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1032
1033 /*
1034 * Set buffer allocations and then assert VRB and TXE.
1035 */
1036 out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
1037 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
1038 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1039 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1040 out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
1041 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1042 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1043 out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1044 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1045
1046 return 0;
1047}
1048
1049static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1050{
8115846e 1051 .want_sdr = true,
66b7e504
SR
1052 .core_init = ppc460ex_pciex_core_init,
1053 .port_init_hw = ppc460ex_pciex_init_port_hw,
1054 .setup_utl = ppc460ex_pciex_init_utl,
112d1fe9 1055 .check_link = ppc4xx_pciex_check_link_sdr,
66b7e504
SR
1056};
1057
b6bb23b9
VNHT
1058static int __init apm821xx_pciex_core_init(struct device_node *np)
1059{
1060 /* Return the number of pcie port */
1061 return 1;
1062}
1063
e4565362 1064static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
b6bb23b9
VNHT
1065{
1066 u32 val;
1067
1068 /*
1069 * Do a software reset on PCIe ports.
1070 * This code is to fix the issue that pci drivers doesn't re-assign
1071 * bus number for PCIE devices after Uboot
1072 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1073 * PT quad port, SAS LSI 1064E)
1074 */
1075
1076 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1077 mdelay(10);
1078
1079 if (port->endpoint)
1080 val = PTYPE_LEGACY_ENDPOINT << 20;
1081 else
1082 val = PTYPE_ROOT_PORT << 20;
1083
1084 val |= LNKW_X1 << 12;
1085
1086 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1087 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1088 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1089
1090 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1091 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1092 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1093
1094 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1095 mdelay(50);
1096 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1097
1098 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1099 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1100 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1101
1102 /* Poll for PHY reset */
1103 val = PESDR0_460EX_RSTSTA - port->sdr_base;
1104 if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
1105 printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1106 return -EBUSY;
1107 } else {
1108 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1109 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1110 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1111 PESDRx_RCSSET_RSTPYN);
1112
1113 port->has_ibpre = 1;
1114 return 0;
1115 }
1116}
1117
1118static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1119 .want_sdr = true,
1120 .core_init = apm821xx_pciex_core_init,
1121 .port_init_hw = apm821xx_pciex_init_port_hw,
1122 .setup_utl = ppc460ex_pciex_init_utl,
1123 .check_link = ppc4xx_pciex_check_link_sdr,
1124};
1125
e2efc09e
TM
1126static int __init ppc460sx_pciex_core_init(struct device_node *np)
1127{
1128 /* HSS drive amplitude */
1129 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1130 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1131 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1132 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1133 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1134 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1135 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1136 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1137
1138 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1139 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1140 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1141 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1142
1143 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1144 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1145 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1146 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1147
1148 /* HSS TX pre-emphasis */
1149 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1150 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1151 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1152 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1153 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1154 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1155 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1156 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1157
1158 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1159 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1160 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1161 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1162
1163 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1164 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1165 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1166 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1167
1168 /* HSS TX calibration control */
1169 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1170 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1171 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1172
1173 /* HSS TX slew control */
1174 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1175 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1176 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1177
e7fa1d13
AEK
1178 /* Set HSS PRBS enabled */
1179 mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1180 mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1181
e2efc09e
TM
1182 udelay(100);
1183
1184 /* De-assert PLLRESET */
1185 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1186
1187 /* Reset DL, UTL, GPL before configuration */
1188 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1189 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1190 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1191 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1192 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1193 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1194
1195 udelay(100);
1196
1197 /*
1198 * If bifurcation is not enabled, u-boot would have disabled the
1199 * third PCIe port
1200 */
1201 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1202 0x00000001)) {
1203 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1204 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1205 return 3;
1206 }
1207
1208 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1209 return 2;
1210}
1211
9c57a32b 1212static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
e2efc09e
TM
1213{
1214
1215 if (port->endpoint)
1216 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1217 0x01000000, 0);
1218 else
1219 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1220 0, 0x01000000);
1221
e2efc09e
TM
1222 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1223 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1224 PESDRx_RCSSET_RSTPYN);
1225
1226 port->has_ibpre = 1;
1227
112d1fe9 1228 return ppc4xx_pciex_port_reset_sdr(port);
e2efc09e
TM
1229}
1230
1231static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1232{
1233 /* Max 128 Bytes */
1234 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
e7fa1d13
AEK
1235 /* Assert VRB and TXE - per datasheet turn off addr validation */
1236 out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
e2efc09e
TM
1237 return 0;
1238}
1239
e7fa1d13
AEK
1240static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1241{
1242 void __iomem *mbase;
1243 int attempt = 50;
1244
1245 port->link = 0;
1246
1247 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1248 if (mbase == NULL) {
1249 printk(KERN_ERR "%s: Can't map internal config space !",
1250 port->node->full_name);
1251 goto done;
1252 }
1253
1254 while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1255 & PECFG_460SX_DLLSTA_LINKUP))) {
1256 attempt--;
1257 mdelay(10);
1258 }
1259 if (attempt)
1260 port->link = 1;
1261done:
1262 iounmap(mbase);
1263
1264}
1265
e2efc09e 1266static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
8115846e 1267 .want_sdr = true,
e2efc09e
TM
1268 .core_init = ppc460sx_pciex_core_init,
1269 .port_init_hw = ppc460sx_pciex_init_port_hw,
1270 .setup_utl = ppc460sx_pciex_init_utl,
e7fa1d13 1271 .check_link = ppc460sx_pciex_check_link,
e2efc09e
TM
1272};
1273
a2d2e1ec
BH
1274#endif /* CONFIG_44x */
1275
1276#ifdef CONFIG_40x
1277
1278static int __init ppc405ex_pciex_core_init(struct device_node *np)
1279{
1280 /* Nothing to do, return 2 ports */
1281 return 2;
1282}
1283
1284static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1285{
1286 /* Assert the PE0_PHY reset */
1287 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1288 msleep(1);
1289
1290 /* deassert the PE0_hotreset */
1291 if (port->endpoint)
1292 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1293 else
1294 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1295
1296 /* poll for phy !reset */
1297 /* XXX FIXME add timeout */
1298 while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1299 ;
1300
1301 /* deassert the PE0_gpl_utl_reset */
1302 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1303}
1304
9c57a32b 1305static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
a2d2e1ec
BH
1306{
1307 u32 val;
1308
1309 if (port->endpoint)
1310 val = PTYPE_LEGACY_ENDPOINT;
1311 else
1312 val = PTYPE_ROOT_PORT;
1313
1314 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1315 1 << 24 | val << 20 | LNKW_X1 << 12);
1316
1317 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1318 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1319 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1320 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1321
1322 /*
1323 * Only reset the PHY when no link is currently established.
1324 * This is for the Atheros PCIe board which has problems to establish
1325 * the link (again) after this PHY reset. All other currently tested
1326 * PCIe boards don't show this problem.
1327 * This has to be re-tested and fixed in a later release!
1328 */
a2d2e1ec
BH
1329 val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1330 if (!(val & 0x00001000))
1331 ppc405ex_pcie_phy_reset(port);
a2d2e1ec
BH
1332
1333 dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
1334
55aaf6ec
SR
1335 port->has_ibpre = 1;
1336
112d1fe9 1337 return ppc4xx_pciex_port_reset_sdr(port);
a2d2e1ec
BH
1338}
1339
1340static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1341{
a2d2e1ec
BH
1342 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1343
a2d2e1ec
BH
1344 /*
1345 * Set buffer allocations and then assert VRB and TXE.
1346 */
035ee428
BH
1347 out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
1348 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1349 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1350 out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
1351 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1352 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1353 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1354 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
a2d2e1ec 1355
035ee428 1356 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
a2d2e1ec
BH
1357
1358 return 0;
1359}
1360
1361static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1362{
8115846e 1363 .want_sdr = true,
a2d2e1ec
BH
1364 .core_init = ppc405ex_pciex_core_init,
1365 .port_init_hw = ppc405ex_pciex_init_port_hw,
1366 .setup_utl = ppc405ex_pciex_init_utl,
112d1fe9 1367 .check_link = ppc4xx_pciex_check_link_sdr,
a2d2e1ec
BH
1368};
1369
1370#endif /* CONFIG_40x */
1371
df777bd3
TB
1372#ifdef CONFIG_476FPE
1373static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1374{
1375 return 4;
1376}
1377
1378static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1379{
1380 u32 timeout_ms = 20;
1381 u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1382 void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1383 0x1000);
1384
1385 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1386
1387 if (mbase == NULL) {
1388 printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1389 port->index);
1390 return;
1391 }
1392
1393 while (timeout_ms--) {
1394 val = in_le32(mbase + PECFG_TLDLP);
1395
1396 if ((val & mask) == mask)
1397 break;
1398 msleep(10);
1399 }
1400
1401 if (val & PECFG_TLDLP_PRESENT) {
1402 printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1403 port->link = 1;
1404 } else
1405 printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1406
1407 iounmap(mbase);
1408 return;
1409}
1410
1411static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1412{
1413 .core_init = ppc_476fpe_pciex_core_init,
1414 .check_link = ppc_476fpe_pciex_check_link,
1415};
1416#endif /* CONFIG_476FPE */
1417
a2d2e1ec
BH
1418/* Check that the core has been initied and if not, do it */
1419static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1420{
1421 static int core_init;
1422 int count = -ENODEV;
1423
1424 if (core_init++)
1425 return 0;
1426
1427#ifdef CONFIG_44x
accf5ef2
SR
1428 if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1429 if (ppc440spe_revA())
1430 ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1431 else
1432 ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1433 }
66b7e504
SR
1434 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1435 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
e2efc09e
TM
1436 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1437 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
b6bb23b9
VNHT
1438 if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1439 ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
a2d2e1ec
BH
1440#endif /* CONFIG_44x */
1441#ifdef CONFIG_40x
1442 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1443 ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
df777bd3
TB
1444#endif
1445#ifdef CONFIG_476FPE
2a2c74b2
AP
1446 if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
1447 || of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
df777bd3 1448 ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
a2d2e1ec
BH
1449#endif
1450 if (ppc4xx_pciex_hwops == NULL) {
1451 printk(KERN_WARNING "PCIE: unknown host type %s\n",
1452 np->full_name);
1453 return -ENODEV;
1454 }
1455
1456 count = ppc4xx_pciex_hwops->core_init(np);
1457 if (count > 0) {
1458 ppc4xx_pciex_ports =
1459 kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1460 GFP_KERNEL);
1461 if (ppc4xx_pciex_ports) {
1462 ppc4xx_pciex_port_count = count;
1463 return 0;
1464 }
1465 printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1466 return -ENOMEM;
1467 }
1468 return -ENODEV;
1469}
1470
1471static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1472{
1473 /* We map PCI Express configuration based on the reg property */
1474 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1475 RES_TO_U32_HIGH(port->cfg_space.start));
1476 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1477 RES_TO_U32_LOW(port->cfg_space.start));
1478
1479 /* XXX FIXME: Use size from reg property. For now, map 512M */
1480 dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1481
1482 /* We map UTL registers based on the reg property */
1483 dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1484 RES_TO_U32_HIGH(port->utl_regs.start));
1485 dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1486 RES_TO_U32_LOW(port->utl_regs.start));
1487
1488 /* XXX FIXME: Use size from reg property */
1489 dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1490
1491 /* Disable all other outbound windows */
1492 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1493 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1494 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1495 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1496}
1497
035ee428
BH
1498static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1499{
1500 int rc = 0;
a2d2e1ec
BH
1501
1502 /* Init HW */
1503 if (ppc4xx_pciex_hwops->port_init_hw)
1504 rc = ppc4xx_pciex_hwops->port_init_hw(port);
1505 if (rc != 0)
1506 return rc;
1507
a2d2e1ec
BH
1508 /*
1509 * Initialize mapping: disable all regions and configure
1510 * CFG and REG regions based on resources in the device tree
1511 */
1512 ppc4xx_pciex_port_init_mapping(port);
1513
e7fa1d13
AEK
1514 if (ppc4xx_pciex_hwops->check_link)
1515 ppc4xx_pciex_hwops->check_link(port);
1516
a2d2e1ec 1517 /*
035ee428
BH
1518 * Map UTL
1519 */
1520 port->utl_base = ioremap(port->utl_regs.start, 0x100);
1521 BUG_ON(port->utl_base == NULL);
1522
1523 /*
1524 * Setup UTL registers --BenH.
a2d2e1ec
BH
1525 */
1526 if (ppc4xx_pciex_hwops->setup_utl)
1527 ppc4xx_pciex_hwops->setup_utl(port);
1528
1529 /*
e7fa1d13 1530 * Check for VC0 active or PLL Locked and assert RDY.
a2d2e1ec 1531 */
112d1fe9 1532 if (port->sdr_base) {
e7fa1d13
AEK
1533 if (of_device_is_compatible(port->node,
1534 "ibm,plb-pciex-460sx")){
1535 if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1536 PESDRn_RCSSTS,
1537 1 << 12, 1 << 12, 5000)) {
1538 printk(KERN_INFO "PCIE%d: PLL not locked\n",
1539 port->index);
1540 port->link = 0;
1541 }
1542 } else if (port->link &&
1543 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1544 1 << 16, 1 << 16, 5000)) {
1545 printk(KERN_INFO "PCIE%d: VC0 not active\n",
1546 port->index);
112d1fe9
TB
1547 port->link = 0;
1548 }
1549
1550 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
a2d2e1ec 1551 }
035ee428 1552
a2d2e1ec
BH
1553 msleep(100);
1554
1555 return 0;
1556}
1557
1558static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1559 struct pci_bus *bus,
1560 unsigned int devfn)
1561{
1562 static int message;
1563
1564 /* Endpoint can not generate upstream(remote) config cycles */
1565 if (port->endpoint && bus->number != port->hose->first_busno)
1566 return PCIBIOS_DEVICE_NOT_FOUND;
1567
1568 /* Check we are within the mapped range */
1569 if (bus->number > port->hose->last_busno) {
1570 if (!message) {
1571 printk(KERN_WARNING "Warning! Probing bus %u"
1572 " out of range !\n", bus->number);
1573 message++;
1574 }
1575 return PCIBIOS_DEVICE_NOT_FOUND;
1576 }
1577
1578 /* The root complex has only one device / function */
1579 if (bus->number == port->hose->first_busno && devfn != 0)
1580 return PCIBIOS_DEVICE_NOT_FOUND;
1581
1582 /* The other side of the RC has only one device as well */
1583 if (bus->number == (port->hose->first_busno + 1) &&
1584 PCI_SLOT(devfn) != 0)
1585 return PCIBIOS_DEVICE_NOT_FOUND;
1586
035ee428
BH
1587 /* Check if we have a link */
1588 if ((bus->number != port->hose->first_busno) && !port->link)
1589 return PCIBIOS_DEVICE_NOT_FOUND;
1590
a2d2e1ec
BH
1591 return 0;
1592}
1593
1594static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1595 struct pci_bus *bus,
1596 unsigned int devfn)
1597{
1598 int relbus;
1599
1600 /* Remove the casts when we finally remove the stupid volatile
1601 * in struct pci_controller
1602 */
1603 if (bus->number == port->hose->first_busno)
1604 return (void __iomem *)port->hose->cfg_addr;
1605
1606 relbus = bus->number - (port->hose->first_busno + 1);
1607 return (void __iomem *)port->hose->cfg_data +
1608 ((relbus << 20) | (devfn << 12));
1609}
1610
1611static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1612 int offset, int len, u32 *val)
1613{
f159edae 1614 struct pci_controller *hose = pci_bus_to_host(bus);
a2d2e1ec
BH
1615 struct ppc4xx_pciex_port *port =
1616 &ppc4xx_pciex_ports[hose->indirect_type];
1617 void __iomem *addr;
1618 u32 gpl_cfg;
1619
1620 BUG_ON(hose != port->hose);
1621
1622 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1623 return PCIBIOS_DEVICE_NOT_FOUND;
1624
1625 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1626
1627 /*
1628 * Reading from configuration space of non-existing device can
1629 * generate transaction errors. For the read duration we suppress
1630 * assertion of machine check exceptions to avoid those.
1631 */
1632 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1633 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1634
035ee428
BH
1635 /* Make sure no CRS is recorded */
1636 out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1637
a2d2e1ec
BH
1638 switch (len) {
1639 case 1:
1640 *val = in_8((u8 *)(addr + offset));
1641 break;
1642 case 2:
1643 *val = in_le16((u16 *)(addr + offset));
1644 break;
1645 default:
1646 *val = in_le32((u32 *)(addr + offset));
1647 break;
1648 }
1649
1650 pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1651 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1652 bus->number, hose->first_busno, hose->last_busno,
1653 devfn, offset, len, addr + offset, *val);
1654
035ee428
BH
1655 /* Check for CRS (440SPe rev B does that for us but heh ..) */
1656 if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1657 pr_debug("Got CRS !\n");
1658 if (len != 4 || offset != 0)
1659 return PCIBIOS_DEVICE_NOT_FOUND;
1660 *val = 0xffff0001;
1661 }
1662
a2d2e1ec
BH
1663 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1664
1665 return PCIBIOS_SUCCESSFUL;
1666}
1667
1668static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1669 int offset, int len, u32 val)
1670{
f159edae 1671 struct pci_controller *hose = pci_bus_to_host(bus);
a2d2e1ec
BH
1672 struct ppc4xx_pciex_port *port =
1673 &ppc4xx_pciex_ports[hose->indirect_type];
1674 void __iomem *addr;
1675 u32 gpl_cfg;
1676
1677 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1678 return PCIBIOS_DEVICE_NOT_FOUND;
1679
1680 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1681
1682 /*
1683 * Reading from configuration space of non-existing device can
1684 * generate transaction errors. For the read duration we suppress
1685 * assertion of machine check exceptions to avoid those.
1686 */
1687 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1688 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1689
1690 pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1691 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1692 bus->number, hose->first_busno, hose->last_busno,
1693 devfn, offset, len, addr + offset, val);
1694
1695 switch (len) {
1696 case 1:
1697 out_8((u8 *)(addr + offset), val);
1698 break;
1699 case 2:
1700 out_le16((u16 *)(addr + offset), val);
1701 break;
1702 default:
1703 out_le32((u32 *)(addr + offset), val);
1704 break;
1705 }
1706
1707 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1708
1709 return PCIBIOS_SUCCESSFUL;
1710}
1711
1712static struct pci_ops ppc4xx_pciex_pci_ops =
1713{
1714 .read = ppc4xx_pciex_read_config,
1715 .write = ppc4xx_pciex_write_config,
1716};
1717
84d727a1
BH
1718static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1719 struct pci_controller *hose,
1720 void __iomem *mbase,
1721 u64 plb_addr,
1722 u64 pci_addr,
1723 u64 size,
1724 unsigned int flags,
1725 int index)
1726{
1727 u32 lah, lal, pciah, pcial, sa;
1728
1729 if (!is_power_of_2(size) ||
1730 (index < 2 && size < 0x100000) ||
1731 (index == 2 && size < 0x100) ||
1732 (plb_addr & (size - 1)) != 0) {
1733 printk(KERN_WARNING "%s: Resource out of range\n",
1734 hose->dn->full_name);
1735 return -1;
1736 }
1737
1738 /* Calculate register values */
1739 lah = RES_TO_U32_HIGH(plb_addr);
1740 lal = RES_TO_U32_LOW(plb_addr);
1741 pciah = RES_TO_U32_HIGH(pci_addr);
1742 pcial = RES_TO_U32_LOW(pci_addr);
1743 sa = (0xffffffffu << ilog2(size)) | 0x1;
1744
1745 /* Program register values */
1746 switch (index) {
1747 case 0:
1748 out_le32(mbase + PECFG_POM0LAH, pciah);
1749 out_le32(mbase + PECFG_POM0LAL, pcial);
1750 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1751 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1752 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
e7fa1d13
AEK
1753 /*Enabled and single region */
1754 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1755 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1756 sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1757 | DCRO_PEGPL_OMRxMSKL_VAL);
2a2c74b2
AP
1758 else if (of_device_is_compatible(
1759 port->node, "ibm,plb-pciex-476fpe") ||
1760 of_device_is_compatible(
1761 port->node, "ibm,plb-pciex-476gtr"))
df777bd3
TB
1762 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1763 sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1764 | DCRO_PEGPL_OMRxMSKL_VAL);
e7fa1d13
AEK
1765 else
1766 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1767 sa | DCRO_PEGPL_OMR1MSKL_UOT
1768 | DCRO_PEGPL_OMRxMSKL_VAL);
84d727a1
BH
1769 break;
1770 case 1:
1771 out_le32(mbase + PECFG_POM1LAH, pciah);
1772 out_le32(mbase + PECFG_POM1LAL, pcial);
1773 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1774 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1775 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
e7fa1d13
AEK
1776 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1777 sa | DCRO_PEGPL_OMRxMSKL_VAL);
84d727a1
BH
1778 break;
1779 case 2:
1780 out_le32(mbase + PECFG_POM2LAH, pciah);
1781 out_le32(mbase + PECFG_POM2LAL, pcial);
1782 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1783 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1784 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1785 /* Note that 3 here means enabled | IO space !!! */
e7fa1d13
AEK
1786 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1787 sa | DCRO_PEGPL_OMR3MSKL_IO
1788 | DCRO_PEGPL_OMRxMSKL_VAL);
84d727a1
BH
1789 break;
1790 }
1791
1792 return 0;
1793}
1794
a2d2e1ec
BH
1795static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1796 struct pci_controller *hose,
1797 void __iomem *mbase)
1798{
84d727a1 1799 int i, j, found_isa_hole = 0;
a2d2e1ec
BH
1800
1801 /* Setup outbound memory windows */
1802 for (i = j = 0; i < 3; i++) {
1803 struct resource *res = &hose->mem_resources[i];
3fd47f06 1804 resource_size_t offset = hose->mem_offset[i];
a2d2e1ec
BH
1805
1806 /* we only care about memory windows */
1807 if (!(res->flags & IORESOURCE_MEM))
1808 continue;
1809 if (j > 1) {
1810 printk(KERN_WARNING "%s: Too many ranges\n",
1811 port->node->full_name);
1812 break;
1813 }
1814
84d727a1
BH
1815 /* Configure the resource */
1816 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1817 res->start,
3fd47f06 1818 res->start - offset,
28f65c11 1819 resource_size(res),
84d727a1
BH
1820 res->flags,
1821 j) == 0) {
1822 j++;
1823
1824 /* If the resource PCI address is 0 then we have our
1825 * ISA memory hole
1826 */
3fd47f06 1827 if (res->start == offset)
84d727a1 1828 found_isa_hole = 1;
a2d2e1ec 1829 }
a2d2e1ec
BH
1830 }
1831
84d727a1
BH
1832 /* Handle ISA memory hole if not already covered */
1833 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1834 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1835 hose->isa_mem_phys, 0,
1836 hose->isa_mem_size, 0, j) == 0)
1837 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1838 hose->dn->full_name);
1839
1840 /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1841 * Note also that it -has- to be region index 2 on this HW
1842 */
1843 if (hose->io_resource.flags & IORESOURCE_IO)
1844 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1845 hose->io_base_phys, 0,
1846 0x10000, IORESOURCE_IO, 2);
a2d2e1ec
BH
1847}
1848
1849static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1850 struct pci_controller *hose,
1851 void __iomem *mbase,
1852 struct resource *res)
1853{
28f65c11 1854 resource_size_t size = resource_size(res);
a2d2e1ec
BH
1855 u64 sa;
1856
80daac3f
SR
1857 if (port->endpoint) {
1858 resource_size_t ep_addr = 0;
1859 resource_size_t ep_size = 32 << 20;
1860
1861 /* Currently we map a fixed 64MByte window to PLB address
1862 * 0 (SDRAM). This should probably be configurable via a dts
1863 * property.
1864 */
1865
1866 /* Calculate window size */
d258e64e 1867 sa = (0xffffffffffffffffull << ilog2(ep_size));
80daac3f
SR
1868
1869 /* Setup BAR0 */
1870 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1871 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1872 PCI_BASE_ADDRESS_MEM_TYPE_64);
a2d2e1ec 1873
80daac3f
SR
1874 /* Disable BAR1 & BAR2 */
1875 out_le32(mbase + PECFG_BAR1MPA, 0);
1876 out_le32(mbase + PECFG_BAR2HMPA, 0);
1877 out_le32(mbase + PECFG_BAR2LMPA, 0);
a2d2e1ec 1878
80daac3f
SR
1879 out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1880 out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1881
1882 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1883 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1884 } else {
1885 /* Calculate window size */
d258e64e 1886 sa = (0xffffffffffffffffull << ilog2(size));
80daac3f 1887 if (res->flags & IORESOURCE_PREFETCH)
9fb55296 1888 sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
80daac3f 1889
df777bd3 1890 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
2a2c74b2
AP
1891 of_device_is_compatible(
1892 port->node, "ibm,plb-pciex-476fpe") ||
1893 of_device_is_compatible(
1894 port->node, "ibm,plb-pciex-476gtr"))
e7fa1d13
AEK
1895 sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1896
80daac3f
SR
1897 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1898 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1899
1900 /* The setup of the split looks weird to me ... let's see
1901 * if it works
1902 */
1903 out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1904 out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1905 out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1906 out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1907 out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1908 out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1909
1910 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1911 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1912 }
a2d2e1ec
BH
1913
1914 /* Enable inbound mapping */
1915 out_le32(mbase + PECFG_PIMEN, 0x1);
1916
a2d2e1ec
BH
1917 /* Enable I/O, Mem, and Busmaster cycles */
1918 out_le16(mbase + PCI_COMMAND,
1919 in_le16(mbase + PCI_COMMAND) |
1920 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1921}
1922
1923static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1924{
1925 struct resource dma_window;
1926 struct pci_controller *hose = NULL;
1927 const int *bus_range;
1928 int primary = 0, busses;
1929 void __iomem *mbase = NULL, *cfg_data = NULL;
80daac3f
SR
1930 const u32 *pval;
1931 u32 val;
a2d2e1ec
BH
1932
1933 /* Check if primary bridge */
1934 if (of_get_property(port->node, "primary", NULL))
1935 primary = 1;
1936
1937 /* Get bus range if any */
1938 bus_range = of_get_property(port->node, "bus-range", NULL);
1939
1940 /* Allocate the host controller data structure */
1941 hose = pcibios_alloc_controller(port->node);
1942 if (!hose)
1943 goto fail;
1944
1945 /* We stick the port number in "indirect_type" so the config space
1946 * ops can retrieve the port data structure easily
1947 */
1948 hose->indirect_type = port->index;
1949
1950 /* Get bus range */
1951 hose->first_busno = bus_range ? bus_range[0] : 0x0;
1952 hose->last_busno = bus_range ? bus_range[1] : 0xff;
1953
1954 /* Because of how big mapping the config space is (1M per bus), we
1955 * limit how many busses we support. In the long run, we could replace
1956 * that with something akin to kmap_atomic instead. We set aside 1 bus
1957 * for the host itself too.
1958 */
1959 busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1960 if (busses > MAX_PCIE_BUS_MAPPED) {
1961 busses = MAX_PCIE_BUS_MAPPED;
1962 hose->last_busno = hose->first_busno + busses;
1963 }
1964
80daac3f
SR
1965 if (!port->endpoint) {
1966 /* Only map the external config space in cfg_data for
1967 * PCIe root-complexes. External space is 1M per bus
1968 */
1969 cfg_data = ioremap(port->cfg_space.start +
1970 (hose->first_busno + 1) * 0x100000,
1971 busses * 0x100000);
1972 if (cfg_data == NULL) {
1973 printk(KERN_ERR "%s: Can't map external config space !",
1974 port->node->full_name);
1975 goto fail;
1976 }
1977 hose->cfg_data = cfg_data;
1978 }
1979
1980 /* Always map the host config space in cfg_addr.
1981 * Internal space is 4K
a2d2e1ec 1982 */
a2d2e1ec 1983 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
80daac3f
SR
1984 if (mbase == NULL) {
1985 printk(KERN_ERR "%s: Can't map internal config space !",
a2d2e1ec
BH
1986 port->node->full_name);
1987 goto fail;
1988 }
a2d2e1ec
BH
1989 hose->cfg_addr = mbase;
1990
1991 pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1992 hose->first_busno, hose->last_busno);
1993 pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
1994 hose->cfg_addr, hose->cfg_data);
1995
1996 /* Setup config space */
1997 hose->ops = &ppc4xx_pciex_pci_ops;
1998 port->hose = hose;
1999 mbase = (void __iomem *)hose->cfg_addr;
2000
80daac3f
SR
2001 if (!port->endpoint) {
2002 /*
2003 * Set bus numbers on our root port
2004 */
2005 out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
2006 out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
2007 out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
2008 }
a2d2e1ec
BH
2009
2010 /*
2011 * OMRs are already reset, also disable PIMs
2012 */
2013 out_le32(mbase + PECFG_PIMEN, 0);
2014
2015 /* Parse outbound mapping resources */
2016 pci_process_bridge_OF_ranges(hose, port->node, primary);
2017
2018 /* Parse inbound mapping resources */
2019 if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
2020 goto fail;
2021
2022 /* Configure outbound ranges POMs */
2023 ppc4xx_configure_pciex_POMs(port, hose, mbase);
2024
2025 /* Configure inbound ranges PIMs */
2026 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
2027
2028 /* The root complex doesn't show up if we don't set some vendor
80daac3f
SR
2029 * and device IDs into it. The defaults below are the same bogus
2030 * one that the initial code in arch/ppc had. This can be
2031 * overwritten by setting the "vendor-id/device-id" properties
2032 * in the pciex node.
a2d2e1ec 2033 */
a2d2e1ec 2034
80daac3f
SR
2035 /* Get the (optional) vendor-/device-id from the device-tree */
2036 pval = of_get_property(port->node, "vendor-id", NULL);
2037 if (pval) {
2038 val = *pval;
2039 } else {
2040 if (!port->endpoint)
2041 val = 0xaaa0 + port->index;
2042 else
2043 val = 0xeee0 + port->index;
2044 }
2045 out_le16(mbase + 0x200, val);
2046
2047 pval = of_get_property(port->node, "device-id", NULL);
2048 if (pval) {
2049 val = *pval;
2050 } else {
2051 if (!port->endpoint)
2052 val = 0xbed0 + port->index;
2053 else
2054 val = 0xfed0 + port->index;
2055 }
2056 out_le16(mbase + 0x202, val);
2057
e7fa1d13
AEK
2058 /* Enable Bus master, memory, and io space */
2059 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
2060 out_le16(mbase + 0x204, 0x7);
2061
80daac3f
SR
2062 if (!port->endpoint) {
2063 /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
2064 out_le32(mbase + 0x208, 0x06040001);
2065
2066 printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
2067 port->index);
2068 } else {
2069 /* Set Class Code to Processor/PPC */
2070 out_le32(mbase + 0x208, 0x0b200001);
2071
2072 printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
2073 port->index);
2074 }
a2d2e1ec 2075
a2d2e1ec
BH
2076 return;
2077 fail:
2078 if (hose)
2079 pcibios_free_controller(hose);
2080 if (cfg_data)
2081 iounmap(cfg_data);
2082 if (mbase)
2083 iounmap(mbase);
2084}
2085
5738ec6d
BH
2086static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
2087{
a2d2e1ec
BH
2088 struct ppc4xx_pciex_port *port;
2089 const u32 *pval;
2090 int portno;
2091 unsigned int dcrs;
80daac3f 2092 const char *val;
a2d2e1ec
BH
2093
2094 /* First, proceed to core initialization as we assume there's
2095 * only one PCIe core in the system
2096 */
2097 if (ppc4xx_pciex_check_core_init(np))
2098 return;
2099
2100 /* Get the port number from the device-tree */
2101 pval = of_get_property(np, "port", NULL);
2102 if (pval == NULL) {
2103 printk(KERN_ERR "PCIE: Can't find port number for %s\n",
2104 np->full_name);
2105 return;
2106 }
2107 portno = *pval;
2108 if (portno >= ppc4xx_pciex_port_count) {
2109 printk(KERN_ERR "PCIE: port number out of range for %s\n",
2110 np->full_name);
2111 return;
2112 }
2113 port = &ppc4xx_pciex_ports[portno];
2114 port->index = portno;
995ada8d
SR
2115
2116 /*
2117 * Check if device is enabled
2118 */
2119 if (!of_device_is_available(np)) {
2120 printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2121 return;
2122 }
2123
a2d2e1ec 2124 port->node = of_node_get(np);
8115846e
TB
2125 if (ppc4xx_pciex_hwops->want_sdr) {
2126 pval = of_get_property(np, "sdr-base", NULL);
2127 if (pval == NULL) {
2128 printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
2129 np->full_name);
2130 return;
2131 }
2132 port->sdr_base = *pval;
a2d2e1ec 2133 }
a2d2e1ec 2134
80daac3f
SR
2135 /* Check if device_type property is set to "pci" or "pci-endpoint".
2136 * Resulting from this setup this PCIe port will be configured
2137 * as root-complex or as endpoint.
2138 */
2139 val = of_get_property(port->node, "device_type", NULL);
2140 if (!strcmp(val, "pci-endpoint")) {
2141 port->endpoint = 1;
2142 } else if (!strcmp(val, "pci")) {
2143 port->endpoint = 0;
2144 } else {
2145 printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
2146 np->full_name);
2147 return;
2148 }
035ee428 2149
a2d2e1ec
BH
2150 /* Fetch config space registers address */
2151 if (of_address_to_resource(np, 0, &port->cfg_space)) {
2152 printk(KERN_ERR "%s: Can't get PCI-E config space !",
2153 np->full_name);
2154 return;
2155 }
2156 /* Fetch host bridge internal registers address */
2157 if (of_address_to_resource(np, 1, &port->utl_regs)) {
2158 printk(KERN_ERR "%s: Can't get UTL register base !",
2159 np->full_name);
2160 return;
2161 }
2162
2163 /* Map DCRs */
2164 dcrs = dcr_resource_start(np, 0);
2165 if (dcrs == 0) {
2166 printk(KERN_ERR "%s: Can't get DCR register base !",
2167 np->full_name);
2168 return;
2169 }
2170 port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2171
2172 /* Initialize the port specific registers */
035ee428
BH
2173 if (ppc4xx_pciex_port_init(port)) {
2174 printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
a2d2e1ec 2175 return;
035ee428 2176 }
a2d2e1ec
BH
2177
2178 /* Setup the linux hose data structure */
2179 ppc4xx_pciex_port_setup_hose(port);
5738ec6d
BH
2180}
2181
a2d2e1ec
BH
2182#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2183
5738ec6d
BH
2184static int __init ppc4xx_pci_find_bridges(void)
2185{
2186 struct device_node *np;
2187
0e47ff1c 2188 pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
41b6a085 2189
a2d2e1ec 2190#ifdef CONFIG_PPC4xx_PCI_EXPRESS
5738ec6d
BH
2191 for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2192 ppc4xx_probe_pciex_bridge(np);
a2d2e1ec 2193#endif
5738ec6d
BH
2194 for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2195 ppc4xx_probe_pcix_bridge(np);
2196 for_each_compatible_node(np, NULL, "ibm,plb-pci")
2197 ppc4xx_probe_pci_bridge(np);
2198
2199 return 0;
2200}
2201arch_initcall(ppc4xx_pci_find_bridges);
2202
This page took 0.521808 seconds and 5 git commands to generate.