[SPARC64]: Consolidate {sbus,pci}_iommu_arena.
[deliverable/linux.git] / arch / sparc64 / kernel / sbus.c
1 /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14
15 #include <asm/page.h>
16 #include <asm/sbus.h>
17 #include <asm/io.h>
18 #include <asm/upa.h>
19 #include <asm/cache.h>
20 #include <asm/dma.h>
21 #include <asm/irq.h>
22 #include <asm/prom.h>
23 #include <asm/starfire.h>
24
25 #include "iommu_common.h"
26
27 #define MAP_BASE ((u32)0xc0000000)
28
29 struct sbus_iommu {
30 spinlock_t lock;
31
32 struct iommu_arena arena;
33
34 iopte_t *page_table;
35 unsigned long strbuf_regs;
36 unsigned long iommu_regs;
37 unsigned long sbus_control_reg;
38
39 volatile unsigned long strbuf_flushflag;
40 };
41
42 /* Offsets from iommu_regs */
43 #define SYSIO_IOMMUREG_BASE 0x2400UL
44 #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
45 #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
46 #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
47 #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
48 #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
49 #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
50 #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
51 #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
52
53 #define IOMMU_DRAM_VALID (1UL << 30UL)
54
55 static void __iommu_flushall(struct sbus_iommu *iommu)
56 {
57 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
58 int entry;
59
60 for (entry = 0; entry < 16; entry++) {
61 upa_writeq(0, tag);
62 tag += 8UL;
63 }
64 upa_readq(iommu->sbus_control_reg);
65 }
66
67 /* Offsets from strbuf_regs */
68 #define SYSIO_STRBUFREG_BASE 0x2800UL
69 #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
70 #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
71 #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
72 #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
73 #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
74 #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
75 #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
76
77 #define STRBUF_TAG_VALID 0x02UL
78
79 static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
80 {
81 unsigned long n;
82 int limit;
83
84 n = npages;
85 while (n--)
86 upa_writeq(base + (n << IO_PAGE_SHIFT),
87 iommu->strbuf_regs + STRBUF_PFLUSH);
88
89 /* If the device could not have possibly put dirty data into
90 * the streaming cache, no flush-flag synchronization needs
91 * to be performed.
92 */
93 if (direction == SBUS_DMA_TODEVICE)
94 return;
95
96 iommu->strbuf_flushflag = 0UL;
97
98 /* Whoopee cushion! */
99 upa_writeq(__pa(&iommu->strbuf_flushflag),
100 iommu->strbuf_regs + STRBUF_FSYNC);
101 upa_readq(iommu->sbus_control_reg);
102
103 limit = 100000;
104 while (iommu->strbuf_flushflag == 0UL) {
105 limit--;
106 if (!limit)
107 break;
108 udelay(1);
109 rmb();
110 }
111 if (!limit)
112 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
113 "vaddr[%08x] npages[%ld]\n",
114 base, npages);
115 }
116
117 /* Based largely upon the ppc64 iommu allocator. */
118 static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages)
119 {
120 struct iommu_arena *arena = &iommu->arena;
121 unsigned long n, i, start, end, limit;
122 int pass;
123
124 limit = arena->limit;
125 start = arena->hint;
126 pass = 0;
127
128 again:
129 n = find_next_zero_bit(arena->map, limit, start);
130 end = n + npages;
131 if (unlikely(end >= limit)) {
132 if (likely(pass < 1)) {
133 limit = start;
134 start = 0;
135 __iommu_flushall(iommu);
136 pass++;
137 goto again;
138 } else {
139 /* Scanned the whole thing, give up. */
140 return -1;
141 }
142 }
143
144 for (i = n; i < end; i++) {
145 if (test_bit(i, arena->map)) {
146 start = i + 1;
147 goto again;
148 }
149 }
150
151 for (i = n; i < end; i++)
152 __set_bit(i, arena->map);
153
154 arena->hint = end;
155
156 return n;
157 }
158
159 static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
160 {
161 unsigned long i;
162
163 for (i = base; i < (base + npages); i++)
164 __clear_bit(i, arena->map);
165 }
166
167 static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize)
168 {
169 unsigned long tsbbase, order, sz, num_tsb_entries;
170
171 num_tsb_entries = tsbsize / sizeof(iopte_t);
172
173 /* Setup initial software IOMMU state. */
174 spin_lock_init(&iommu->lock);
175
176 /* Allocate and initialize the free area map. */
177 sz = num_tsb_entries / 8;
178 sz = (sz + 7UL) & ~7UL;
179 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
180 if (!iommu->arena.map) {
181 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
182 prom_halt();
183 }
184 iommu->arena.limit = num_tsb_entries;
185
186 /* Now allocate and setup the IOMMU page table itself. */
187 order = get_order(tsbsize);
188 tsbbase = __get_free_pages(GFP_KERNEL, order);
189 if (!tsbbase) {
190 prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
191 prom_halt();
192 }
193 iommu->page_table = (iopte_t *)tsbbase;
194 memset(iommu->page_table, 0, tsbsize);
195 }
196
197 static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages)
198 {
199 long entry;
200
201 entry = sbus_arena_alloc(iommu, npages);
202 if (unlikely(entry < 0))
203 return NULL;
204
205 return iommu->page_table + entry;
206 }
207
208 static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages)
209 {
210 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
211 }
212
213 void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
214 {
215 struct sbus_iommu *iommu;
216 iopte_t *iopte;
217 unsigned long flags, order, first_page;
218 void *ret;
219 int npages;
220
221 size = IO_PAGE_ALIGN(size);
222 order = get_order(size);
223 if (order >= 10)
224 return NULL;
225
226 first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
227 if (first_page == 0UL)
228 return NULL;
229 memset((char *)first_page, 0, PAGE_SIZE << order);
230
231 iommu = sdev->bus->iommu;
232
233 spin_lock_irqsave(&iommu->lock, flags);
234 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
235 spin_unlock_irqrestore(&iommu->lock, flags);
236
237 if (unlikely(iopte == NULL)) {
238 free_pages(first_page, order);
239 return NULL;
240 }
241
242 *dvma_addr = (MAP_BASE +
243 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
244 ret = (void *) first_page;
245 npages = size >> IO_PAGE_SHIFT;
246 first_page = __pa(first_page);
247 while (npages--) {
248 iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
249 IOPTE_WRITE |
250 (first_page & IOPTE_PAGE));
251 iopte++;
252 first_page += IO_PAGE_SIZE;
253 }
254
255 return ret;
256 }
257
258 void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
259 {
260 struct sbus_iommu *iommu;
261 iopte_t *iopte;
262 unsigned long flags, order, npages;
263
264 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
265 iommu = sdev->bus->iommu;
266 iopte = iommu->page_table +
267 ((dvma - MAP_BASE) >> IO_PAGE_SHIFT);
268
269 spin_lock_irqsave(&iommu->lock, flags);
270
271 free_npages(iommu, dvma - MAP_BASE, npages);
272
273 spin_unlock_irqrestore(&iommu->lock, flags);
274
275 order = get_order(size);
276 if (order < 10)
277 free_pages((unsigned long)cpu, order);
278 }
279
280 dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
281 {
282 struct sbus_iommu *iommu;
283 iopte_t *base;
284 unsigned long flags, npages, oaddr;
285 unsigned long i, base_paddr;
286 u32 bus_addr, ret;
287 unsigned long iopte_protection;
288
289 iommu = sdev->bus->iommu;
290
291 if (unlikely(direction == SBUS_DMA_NONE))
292 BUG();
293
294 oaddr = (unsigned long)ptr;
295 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
296 npages >>= IO_PAGE_SHIFT;
297
298 spin_lock_irqsave(&iommu->lock, flags);
299 base = alloc_npages(iommu, npages);
300 spin_unlock_irqrestore(&iommu->lock, flags);
301
302 if (unlikely(!base))
303 BUG();
304
305 bus_addr = (MAP_BASE +
306 ((base - iommu->page_table) << IO_PAGE_SHIFT));
307 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
308 base_paddr = __pa(oaddr & IO_PAGE_MASK);
309
310 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
311 if (direction != SBUS_DMA_TODEVICE)
312 iopte_protection |= IOPTE_WRITE;
313
314 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
315 iopte_val(*base) = iopte_protection | base_paddr;
316
317 return ret;
318 }
319
320 void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
321 {
322 struct sbus_iommu *iommu = sdev->bus->iommu;
323 iopte_t *base;
324 unsigned long flags, npages, i;
325
326 if (unlikely(direction == SBUS_DMA_NONE))
327 BUG();
328
329 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
330 npages >>= IO_PAGE_SHIFT;
331 base = iommu->page_table +
332 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
333
334 bus_addr &= IO_PAGE_MASK;
335
336 spin_lock_irqsave(&iommu->lock, flags);
337 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
338 for (i = 0; i < npages; i++)
339 iopte_val(base[i]) = 0UL;
340 free_npages(iommu, bus_addr - MAP_BASE, npages);
341 spin_unlock_irqrestore(&iommu->lock, flags);
342 }
343
344 #define SG_ENT_PHYS_ADDRESS(SG) \
345 (__pa(page_address((SG)->page)) + (SG)->offset)
346
347 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
348 int nused, int nelems, unsigned long iopte_protection)
349 {
350 struct scatterlist *dma_sg = sg;
351 struct scatterlist *sg_end = sg + nelems;
352 int i;
353
354 for (i = 0; i < nused; i++) {
355 unsigned long pteval = ~0UL;
356 u32 dma_npages;
357
358 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
359 dma_sg->dma_length +
360 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
361 do {
362 unsigned long offset;
363 signed int len;
364
365 /* If we are here, we know we have at least one
366 * more page to map. So walk forward until we
367 * hit a page crossing, and begin creating new
368 * mappings from that spot.
369 */
370 for (;;) {
371 unsigned long tmp;
372
373 tmp = SG_ENT_PHYS_ADDRESS(sg);
374 len = sg->length;
375 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
376 pteval = tmp & IO_PAGE_MASK;
377 offset = tmp & (IO_PAGE_SIZE - 1UL);
378 break;
379 }
380 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
381 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
382 offset = 0UL;
383 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
384 break;
385 }
386 sg++;
387 }
388
389 pteval = iopte_protection | (pteval & IOPTE_PAGE);
390 while (len > 0) {
391 *iopte++ = __iopte(pteval);
392 pteval += IO_PAGE_SIZE;
393 len -= (IO_PAGE_SIZE - offset);
394 offset = 0;
395 dma_npages--;
396 }
397
398 pteval = (pteval & IOPTE_PAGE) + len;
399 sg++;
400
401 /* Skip over any tail mappings we've fully mapped,
402 * adjusting pteval along the way. Stop when we
403 * detect a page crossing event.
404 */
405 while (sg < sg_end &&
406 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
407 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
408 ((pteval ^
409 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
410 pteval += sg->length;
411 sg++;
412 }
413 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
414 pteval = ~0UL;
415 } while (dma_npages != 0);
416 dma_sg++;
417 }
418 }
419
420 int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
421 {
422 struct sbus_iommu *iommu;
423 unsigned long flags, npages, iopte_protection;
424 iopte_t *base;
425 u32 dma_base;
426 struct scatterlist *sgtmp;
427 int used;
428
429 /* Fast path single entry scatterlists. */
430 if (nelems == 1) {
431 sglist->dma_address =
432 sbus_map_single(sdev,
433 (page_address(sglist->page) + sglist->offset),
434 sglist->length, direction);
435 sglist->dma_length = sglist->length;
436 return 1;
437 }
438
439 iommu = sdev->bus->iommu;
440
441 if (unlikely(direction == SBUS_DMA_NONE))
442 BUG();
443
444 npages = prepare_sg(sglist, nelems);
445
446 spin_lock_irqsave(&iommu->lock, flags);
447 base = alloc_npages(iommu, npages);
448 spin_unlock_irqrestore(&iommu->lock, flags);
449
450 if (unlikely(base == NULL))
451 BUG();
452
453 dma_base = MAP_BASE +
454 ((base - iommu->page_table) << IO_PAGE_SHIFT);
455
456 /* Normalize DVMA addresses. */
457 used = nelems;
458
459 sgtmp = sglist;
460 while (used && sgtmp->dma_length) {
461 sgtmp->dma_address += dma_base;
462 sgtmp++;
463 used--;
464 }
465 used = nelems - used;
466
467 iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
468 if (direction != SBUS_DMA_TODEVICE)
469 iopte_protection |= IOPTE_WRITE;
470
471 fill_sg(base, sglist, used, nelems, iopte_protection);
472
473 #ifdef VERIFY_SG
474 verify_sglist(sglist, nelems, base, npages);
475 #endif
476
477 return used;
478 }
479
480 void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
481 {
482 struct sbus_iommu *iommu;
483 iopte_t *base;
484 unsigned long flags, i, npages;
485 u32 bus_addr;
486
487 if (unlikely(direction == SBUS_DMA_NONE))
488 BUG();
489
490 iommu = sdev->bus->iommu;
491
492 bus_addr = sglist->dma_address & IO_PAGE_MASK;
493
494 for (i = 1; i < nelems; i++)
495 if (sglist[i].dma_length == 0)
496 break;
497 i--;
498 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
499 bus_addr) >> IO_PAGE_SHIFT;
500
501 base = iommu->page_table +
502 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
503
504 spin_lock_irqsave(&iommu->lock, flags);
505 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
506 for (i = 0; i < npages; i++)
507 iopte_val(base[i]) = 0UL;
508 free_npages(iommu, bus_addr - MAP_BASE, npages);
509 spin_unlock_irqrestore(&iommu->lock, flags);
510 }
511
512 void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
513 {
514 struct sbus_iommu *iommu;
515 unsigned long flags, npages;
516
517 iommu = sdev->bus->iommu;
518
519 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
520 npages >>= IO_PAGE_SHIFT;
521 bus_addr &= IO_PAGE_MASK;
522
523 spin_lock_irqsave(&iommu->lock, flags);
524 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
525 spin_unlock_irqrestore(&iommu->lock, flags);
526 }
527
528 void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
529 {
530 }
531
532 void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
533 {
534 struct sbus_iommu *iommu;
535 unsigned long flags, npages, i;
536 u32 bus_addr;
537
538 iommu = sdev->bus->iommu;
539
540 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
541 for (i = 0; i < nelems; i++) {
542 if (!sglist[i].dma_length)
543 break;
544 }
545 i--;
546 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
547 - bus_addr) >> IO_PAGE_SHIFT;
548
549 spin_lock_irqsave(&iommu->lock, flags);
550 sbus_strbuf_flush(iommu, bus_addr, npages, direction);
551 spin_unlock_irqrestore(&iommu->lock, flags);
552 }
553
554 void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
555 {
556 }
557
558 /* Enable 64-bit DVMA mode for the given device. */
559 void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
560 {
561 struct sbus_iommu *iommu = sdev->bus->iommu;
562 int slot = sdev->slot;
563 unsigned long cfg_reg;
564 u64 val;
565
566 cfg_reg = iommu->sbus_control_reg;
567 switch (slot) {
568 case 0:
569 cfg_reg += 0x20UL;
570 break;
571 case 1:
572 cfg_reg += 0x28UL;
573 break;
574 case 2:
575 cfg_reg += 0x30UL;
576 break;
577 case 3:
578 cfg_reg += 0x38UL;
579 break;
580 case 13:
581 cfg_reg += 0x40UL;
582 break;
583 case 14:
584 cfg_reg += 0x48UL;
585 break;
586 case 15:
587 cfg_reg += 0x50UL;
588 break;
589
590 default:
591 return;
592 };
593
594 val = upa_readq(cfg_reg);
595 if (val & (1UL << 14UL)) {
596 /* Extended transfer mode already enabled. */
597 return;
598 }
599
600 val |= (1UL << 14UL);
601
602 if (bursts & DMA_BURST8)
603 val |= (1UL << 1UL);
604 if (bursts & DMA_BURST16)
605 val |= (1UL << 2UL);
606 if (bursts & DMA_BURST32)
607 val |= (1UL << 3UL);
608 if (bursts & DMA_BURST64)
609 val |= (1UL << 4UL);
610 upa_writeq(val, cfg_reg);
611 }
612
613 /* INO number to IMAP register offset for SYSIO external IRQ's.
614 * This should conform to both Sunfire/Wildfire server and Fusion
615 * desktop designs.
616 */
617 #define SYSIO_IMAP_SLOT0 0x2c04UL
618 #define SYSIO_IMAP_SLOT1 0x2c0cUL
619 #define SYSIO_IMAP_SLOT2 0x2c14UL
620 #define SYSIO_IMAP_SLOT3 0x2c1cUL
621 #define SYSIO_IMAP_SCSI 0x3004UL
622 #define SYSIO_IMAP_ETH 0x300cUL
623 #define SYSIO_IMAP_BPP 0x3014UL
624 #define SYSIO_IMAP_AUDIO 0x301cUL
625 #define SYSIO_IMAP_PFAIL 0x3024UL
626 #define SYSIO_IMAP_KMS 0x302cUL
627 #define SYSIO_IMAP_FLPY 0x3034UL
628 #define SYSIO_IMAP_SHW 0x303cUL
629 #define SYSIO_IMAP_KBD 0x3044UL
630 #define SYSIO_IMAP_MS 0x304cUL
631 #define SYSIO_IMAP_SER 0x3054UL
632 #define SYSIO_IMAP_TIM0 0x3064UL
633 #define SYSIO_IMAP_TIM1 0x306cUL
634 #define SYSIO_IMAP_UE 0x3074UL
635 #define SYSIO_IMAP_CE 0x307cUL
636 #define SYSIO_IMAP_SBERR 0x3084UL
637 #define SYSIO_IMAP_PMGMT 0x308cUL
638 #define SYSIO_IMAP_GFX 0x3094UL
639 #define SYSIO_IMAP_EUPA 0x309cUL
640
641 #define bogon ((unsigned long) -1)
642 static unsigned long sysio_irq_offsets[] = {
643 /* SBUS Slot 0 --> 3, level 1 --> 7 */
644 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
645 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
646 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
647 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
648 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
649 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
650 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
651 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
652
653 /* Onboard devices (not relevant/used on SunFire). */
654 SYSIO_IMAP_SCSI,
655 SYSIO_IMAP_ETH,
656 SYSIO_IMAP_BPP,
657 bogon,
658 SYSIO_IMAP_AUDIO,
659 SYSIO_IMAP_PFAIL,
660 bogon,
661 bogon,
662 SYSIO_IMAP_KMS,
663 SYSIO_IMAP_FLPY,
664 SYSIO_IMAP_SHW,
665 SYSIO_IMAP_KBD,
666 SYSIO_IMAP_MS,
667 SYSIO_IMAP_SER,
668 bogon,
669 bogon,
670 SYSIO_IMAP_TIM0,
671 SYSIO_IMAP_TIM1,
672 bogon,
673 bogon,
674 SYSIO_IMAP_UE,
675 SYSIO_IMAP_CE,
676 SYSIO_IMAP_SBERR,
677 SYSIO_IMAP_PMGMT,
678 };
679
680 #undef bogon
681
682 #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
683
684 /* Convert Interrupt Mapping register pointer to associated
685 * Interrupt Clear register pointer, SYSIO specific version.
686 */
687 #define SYSIO_ICLR_UNUSED0 0x3400UL
688 #define SYSIO_ICLR_SLOT0 0x340cUL
689 #define SYSIO_ICLR_SLOT1 0x344cUL
690 #define SYSIO_ICLR_SLOT2 0x348cUL
691 #define SYSIO_ICLR_SLOT3 0x34ccUL
692 static unsigned long sysio_imap_to_iclr(unsigned long imap)
693 {
694 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
695 return imap + diff;
696 }
697
698 unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
699 {
700 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
701 struct sbus_iommu *iommu = sbus->iommu;
702 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
703 unsigned long imap, iclr;
704 int sbus_level = 0;
705
706 imap = sysio_irq_offsets[ino];
707 if (imap == ((unsigned long)-1)) {
708 prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
709 ino);
710 prom_halt();
711 }
712 imap += reg_base;
713
714 /* SYSIO inconsistency. For external SLOTS, we have to select
715 * the right ICLR register based upon the lower SBUS irq level
716 * bits.
717 */
718 if (ino >= 0x20) {
719 iclr = sysio_imap_to_iclr(imap);
720 } else {
721 int sbus_slot = (ino & 0x18)>>3;
722
723 sbus_level = ino & 0x7;
724
725 switch(sbus_slot) {
726 case 0:
727 iclr = reg_base + SYSIO_ICLR_SLOT0;
728 break;
729 case 1:
730 iclr = reg_base + SYSIO_ICLR_SLOT1;
731 break;
732 case 2:
733 iclr = reg_base + SYSIO_ICLR_SLOT2;
734 break;
735 default:
736 case 3:
737 iclr = reg_base + SYSIO_ICLR_SLOT3;
738 break;
739 };
740
741 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
742 }
743 return build_irq(sbus_level, iclr, imap);
744 }
745
746 /* Error interrupt handling. */
747 #define SYSIO_UE_AFSR 0x0030UL
748 #define SYSIO_UE_AFAR 0x0038UL
749 #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
750 #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
751 #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
752 #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
753 #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
754 #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
755 #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
756 #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
757 #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
758 #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
759 #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
760 static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
761 {
762 struct sbus_bus *sbus = dev_id;
763 struct sbus_iommu *iommu = sbus->iommu;
764 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
765 unsigned long afsr_reg, afar_reg;
766 unsigned long afsr, afar, error_bits;
767 int reported;
768
769 afsr_reg = reg_base + SYSIO_UE_AFSR;
770 afar_reg = reg_base + SYSIO_UE_AFAR;
771
772 /* Latch error status. */
773 afsr = upa_readq(afsr_reg);
774 afar = upa_readq(afar_reg);
775
776 /* Clear primary/secondary error status bits. */
777 error_bits = afsr &
778 (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
779 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
780 upa_writeq(error_bits, afsr_reg);
781
782 /* Log the error. */
783 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
784 sbus->portid,
785 (((error_bits & SYSIO_UEAFSR_PPIO) ?
786 "PIO" :
787 ((error_bits & SYSIO_UEAFSR_PDRD) ?
788 "DVMA Read" :
789 ((error_bits & SYSIO_UEAFSR_PDWR) ?
790 "DVMA Write" : "???")))));
791 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
792 sbus->portid,
793 (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
794 (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
795 (afsr & SYSIO_UEAFSR_MID) >> 37UL);
796 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
797 printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
798 reported = 0;
799 if (afsr & SYSIO_UEAFSR_SPIO) {
800 reported++;
801 printk("(PIO)");
802 }
803 if (afsr & SYSIO_UEAFSR_SDRD) {
804 reported++;
805 printk("(DVMA Read)");
806 }
807 if (afsr & SYSIO_UEAFSR_SDWR) {
808 reported++;
809 printk("(DVMA Write)");
810 }
811 if (!reported)
812 printk("(none)");
813 printk("]\n");
814
815 return IRQ_HANDLED;
816 }
817
818 #define SYSIO_CE_AFSR 0x0040UL
819 #define SYSIO_CE_AFAR 0x0048UL
820 #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
821 #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
822 #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
823 #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
824 #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
825 #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
826 #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
827 #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
828 #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
829 #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
830 #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
831 #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
832 static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
833 {
834 struct sbus_bus *sbus = dev_id;
835 struct sbus_iommu *iommu = sbus->iommu;
836 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
837 unsigned long afsr_reg, afar_reg;
838 unsigned long afsr, afar, error_bits;
839 int reported;
840
841 afsr_reg = reg_base + SYSIO_CE_AFSR;
842 afar_reg = reg_base + SYSIO_CE_AFAR;
843
844 /* Latch error status. */
845 afsr = upa_readq(afsr_reg);
846 afar = upa_readq(afar_reg);
847
848 /* Clear primary/secondary error status bits. */
849 error_bits = afsr &
850 (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
851 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
852 upa_writeq(error_bits, afsr_reg);
853
854 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
855 sbus->portid,
856 (((error_bits & SYSIO_CEAFSR_PPIO) ?
857 "PIO" :
858 ((error_bits & SYSIO_CEAFSR_PDRD) ?
859 "DVMA Read" :
860 ((error_bits & SYSIO_CEAFSR_PDWR) ?
861 "DVMA Write" : "???")))));
862
863 /* XXX Use syndrome and afar to print out module string just like
864 * XXX UDB CE trap handler does... -DaveM
865 */
866 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
867 sbus->portid,
868 (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
869 (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
870 (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
871 (afsr & SYSIO_CEAFSR_MID) >> 37UL);
872 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
873
874 printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
875 reported = 0;
876 if (afsr & SYSIO_CEAFSR_SPIO) {
877 reported++;
878 printk("(PIO)");
879 }
880 if (afsr & SYSIO_CEAFSR_SDRD) {
881 reported++;
882 printk("(DVMA Read)");
883 }
884 if (afsr & SYSIO_CEAFSR_SDWR) {
885 reported++;
886 printk("(DVMA Write)");
887 }
888 if (!reported)
889 printk("(none)");
890 printk("]\n");
891
892 return IRQ_HANDLED;
893 }
894
895 #define SYSIO_SBUS_AFSR 0x2010UL
896 #define SYSIO_SBUS_AFAR 0x2018UL
897 #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
898 #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
899 #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
900 #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
901 #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
902 #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
903 #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
904 #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
905 #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
906 #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
907 #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
908 #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
909 static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
910 {
911 struct sbus_bus *sbus = dev_id;
912 struct sbus_iommu *iommu = sbus->iommu;
913 unsigned long afsr_reg, afar_reg, reg_base;
914 unsigned long afsr, afar, error_bits;
915 int reported;
916
917 reg_base = iommu->sbus_control_reg - 0x2000UL;
918 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
919 afar_reg = reg_base + SYSIO_SBUS_AFAR;
920
921 afsr = upa_readq(afsr_reg);
922 afar = upa_readq(afar_reg);
923
924 /* Clear primary/secondary error status bits. */
925 error_bits = afsr &
926 (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
927 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
928 upa_writeq(error_bits, afsr_reg);
929
930 /* Log the error. */
931 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
932 sbus->portid,
933 (((error_bits & SYSIO_SBAFSR_PLE) ?
934 "Late PIO Error" :
935 ((error_bits & SYSIO_SBAFSR_PTO) ?
936 "Time Out" :
937 ((error_bits & SYSIO_SBAFSR_PBERR) ?
938 "Error Ack" : "???")))),
939 (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
940 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
941 sbus->portid,
942 (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
943 (afsr & SYSIO_SBAFSR_MID) >> 37UL);
944 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
945 printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
946 reported = 0;
947 if (afsr & SYSIO_SBAFSR_SLE) {
948 reported++;
949 printk("(Late PIO Error)");
950 }
951 if (afsr & SYSIO_SBAFSR_STO) {
952 reported++;
953 printk("(Time Out)");
954 }
955 if (afsr & SYSIO_SBAFSR_SBERR) {
956 reported++;
957 printk("(Error Ack)");
958 }
959 if (!reported)
960 printk("(none)");
961 printk("]\n");
962
963 /* XXX check iommu/strbuf for further error status XXX */
964
965 return IRQ_HANDLED;
966 }
967
968 #define ECC_CONTROL 0x0020UL
969 #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
970 #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
971 #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
972
973 #define SYSIO_UE_INO 0x34
974 #define SYSIO_CE_INO 0x35
975 #define SYSIO_SBUSERR_INO 0x36
976
977 static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
978 {
979 struct sbus_iommu *iommu = sbus->iommu;
980 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
981 unsigned int irq;
982 u64 control;
983
984 irq = sbus_build_irq(sbus, SYSIO_UE_INO);
985 if (request_irq(irq, sysio_ue_handler,
986 IRQF_SHARED, "SYSIO UE", sbus) < 0) {
987 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
988 sbus->portid);
989 prom_halt();
990 }
991
992 irq = sbus_build_irq(sbus, SYSIO_CE_INO);
993 if (request_irq(irq, sysio_ce_handler,
994 IRQF_SHARED, "SYSIO CE", sbus) < 0) {
995 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
996 sbus->portid);
997 prom_halt();
998 }
999
1000 irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1001 if (request_irq(irq, sysio_sbus_error_handler,
1002 IRQF_SHARED, "SYSIO SBUS Error", sbus) < 0) {
1003 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1004 sbus->portid);
1005 prom_halt();
1006 }
1007
1008 /* Now turn the error interrupts on and also enable ECC checking. */
1009 upa_writeq((SYSIO_ECNTRL_ECCEN |
1010 SYSIO_ECNTRL_UEEN |
1011 SYSIO_ECNTRL_CEEN),
1012 reg_base + ECC_CONTROL);
1013
1014 control = upa_readq(iommu->sbus_control_reg);
1015 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1016 upa_writeq(control, iommu->sbus_control_reg);
1017 }
1018
1019 /* Boot time initialization. */
1020 static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1021 {
1022 const struct linux_prom64_registers *pr;
1023 struct device_node *dp;
1024 struct sbus_iommu *iommu;
1025 unsigned long regs;
1026 u64 control;
1027 int i;
1028
1029 dp = of_find_node_by_phandle(__node);
1030
1031 sbus->portid = of_getintprop_default(dp, "upa-portid", -1);
1032
1033 pr = of_get_property(dp, "reg", NULL);
1034 if (!pr) {
1035 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1036 prom_halt();
1037 }
1038 regs = pr->phys_addr;
1039
1040 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1041 if (iommu == NULL) {
1042 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1043 prom_halt();
1044 }
1045
1046 /* Align on E$ line boundary. */
1047 iommu = (struct sbus_iommu *)
1048 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1049 ~(SMP_CACHE_BYTES - 1UL));
1050
1051 memset(iommu, 0, sizeof(*iommu));
1052
1053 /* Setup spinlock. */
1054 spin_lock_init(&iommu->lock);
1055
1056 /* Init register offsets. */
1057 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1058 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1059
1060 /* The SYSIO SBUS control register is used for dummy reads
1061 * in order to ensure write completion.
1062 */
1063 iommu->sbus_control_reg = regs + 0x2000UL;
1064
1065 /* Link into SYSIO software state. */
1066 sbus->iommu = iommu;
1067
1068 printk("SYSIO: UPA portID %x, at %016lx\n",
1069 sbus->portid, regs);
1070
1071 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1072 sbus_iommu_table_init(iommu, IO_TSB_SIZE);
1073
1074 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1075 control = ((7UL << 16UL) |
1076 (0UL << 2UL) |
1077 (1UL << 1UL) |
1078 (1UL << 0UL));
1079 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1080
1081 /* Clean out any cruft in the IOMMU using
1082 * diagnostic accesses.
1083 */
1084 for (i = 0; i < 16; i++) {
1085 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1086 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1087
1088 dram += (unsigned long)i * 8UL;
1089 tag += (unsigned long)i * 8UL;
1090 upa_writeq(0, dram);
1091 upa_writeq(0, tag);
1092 }
1093 upa_readq(iommu->sbus_control_reg);
1094
1095 /* Give the TSB to SYSIO. */
1096 upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE);
1097
1098 /* Setup streaming buffer, DE=1 SB_EN=1 */
1099 control = (1UL << 1UL) | (1UL << 0UL);
1100 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1101
1102 /* Clear out the tags using diagnostics. */
1103 for (i = 0; i < 16; i++) {
1104 unsigned long ptag, ltag;
1105
1106 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1107 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1108 ptag += (unsigned long)i * 8UL;
1109 ltag += (unsigned long)i * 8UL;
1110
1111 upa_writeq(0UL, ptag);
1112 upa_writeq(0UL, ltag);
1113 }
1114
1115 /* Enable DVMA arbitration for all devices/slots. */
1116 control = upa_readq(iommu->sbus_control_reg);
1117 control |= 0x3fUL;
1118 upa_writeq(control, iommu->sbus_control_reg);
1119
1120 /* Now some Xfire specific grot... */
1121 if (this_is_starfire)
1122 starfire_hookup(sbus->portid);
1123
1124 sysio_register_error_handlers(sbus);
1125 }
1126
1127 void sbus_fill_device_irq(struct sbus_dev *sdev)
1128 {
1129 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
1130 const struct linux_prom_irqs *irqs;
1131
1132 irqs = of_get_property(dp, "interrupts", NULL);
1133 if (!irqs) {
1134 sdev->irqs[0] = 0;
1135 sdev->num_irqs = 0;
1136 } else {
1137 unsigned int pri = irqs[0].pri;
1138
1139 sdev->num_irqs = 1;
1140 if (pri < 0x20)
1141 pri += sdev->slot * 8;
1142
1143 sdev->irqs[0] = sbus_build_irq(sdev->bus, pri);
1144 }
1145 }
1146
1147 void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
1148 {
1149 }
1150
1151 void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
1152 {
1153 sbus_iommu_init(dp->node, sbus);
1154 }
1155
1156 void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
1157 {
1158 }
1159
1160 int __init sbus_arch_preinit(void)
1161 {
1162 return 0;
1163 }
1164
1165 void __init sbus_arch_postinit(void)
1166 {
1167 extern void firetruck_init(void);
1168
1169 firetruck_init();
1170 }
This page took 0.069202 seconds and 5 git commands to generate.