Merge branch 'core/printk' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux...
[deliverable/linux.git] / arch / x86 / kernel / pci-gart_64.c
1 /*
2 * Dynamic DMA mapping support for AMD Hammer.
3 *
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
7 *
8 * See Documentation/DMA-mapping.txt for the interface specification.
9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
12 */
13
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <asm/atomic.h>
31 #include <asm/io.h>
32 #include <asm/mtrr.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
35 #include <asm/gart.h>
36 #include <asm/cacheflush.h>
37 #include <asm/swiotlb.h>
38 #include <asm/dma.h>
39 #include <asm/k8.h>
40
41 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
42 static unsigned long iommu_size; /* size of remapping area bytes */
43 static unsigned long iommu_pages; /* .. and in pages */
44
45 static u32 *iommu_gatt_base; /* Remapping table */
46
47 /*
48 * If this is disabled the IOMMU will use an optimized flushing strategy
49 * of only flushing when an mapping is reused. With it true the GART is
50 * flushed for every mapping. Problem is that doing the lazy flush seems
51 * to trigger bugs with some popular PCI cards, in particular 3ware (but
52 * has been also also seen with Qlogic at least).
53 */
54 int iommu_fullflush = 1;
55
56 /* Allocation bitmap for the remapping area: */
57 static DEFINE_SPINLOCK(iommu_bitmap_lock);
58 /* Guarded by iommu_bitmap_lock: */
59 static unsigned long *iommu_gart_bitmap;
60
61 static u32 gart_unmapped_entry;
62
63 #define GPTE_VALID 1
64 #define GPTE_COHERENT 2
65 #define GPTE_ENCODE(x) \
66 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
67 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
68
69 #define to_pages(addr, size) \
70 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
71
72 #define EMERGENCY_PAGES 32 /* = 128KB */
73
74 #ifdef CONFIG_AGP
75 #define AGPEXTERN extern
76 #else
77 #define AGPEXTERN
78 #endif
79
80 /* backdoor interface to AGP driver */
81 AGPEXTERN int agp_memory_reserved;
82 AGPEXTERN __u32 *agp_gatt_table;
83
84 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
85 static int need_flush; /* global flush state. set for each gart wrap */
86
87 static unsigned long alloc_iommu(struct device *dev, int size)
88 {
89 unsigned long offset, flags;
90 unsigned long boundary_size;
91 unsigned long base_index;
92
93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
94 PAGE_SIZE) >> PAGE_SHIFT;
95 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
96 PAGE_SIZE) >> PAGE_SHIFT;
97
98 spin_lock_irqsave(&iommu_bitmap_lock, flags);
99 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
100 size, base_index, boundary_size, 0);
101 if (offset == -1) {
102 need_flush = 1;
103 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
104 size, base_index, boundary_size, 0);
105 }
106 if (offset != -1) {
107 next_bit = offset+size;
108 if (next_bit >= iommu_pages) {
109 next_bit = 0;
110 need_flush = 1;
111 }
112 }
113 if (iommu_fullflush)
114 need_flush = 1;
115 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
116
117 return offset;
118 }
119
120 static void free_iommu(unsigned long offset, int size)
121 {
122 unsigned long flags;
123
124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
125 iommu_area_free(iommu_gart_bitmap, offset, size);
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127 }
128
129 /*
130 * Use global flush state to avoid races with multiple flushers.
131 */
132 static void flush_gart(void)
133 {
134 unsigned long flags;
135
136 spin_lock_irqsave(&iommu_bitmap_lock, flags);
137 if (need_flush) {
138 k8_flush_garts();
139 need_flush = 0;
140 }
141 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
142 }
143
144 #ifdef CONFIG_IOMMU_LEAK
145
146 #define SET_LEAK(x) \
147 do { \
148 if (iommu_leak_tab) \
149 iommu_leak_tab[x] = __builtin_return_address(0);\
150 } while (0)
151
152 #define CLEAR_LEAK(x) \
153 do { \
154 if (iommu_leak_tab) \
155 iommu_leak_tab[x] = NULL; \
156 } while (0)
157
158 /* Debugging aid for drivers that don't free their IOMMU tables */
159 static void **iommu_leak_tab;
160 static int leak_trace;
161 static int iommu_leak_pages = 20;
162
163 static void dump_leak(void)
164 {
165 int i;
166 static int dump;
167
168 if (dump || !iommu_leak_tab)
169 return;
170 dump = 1;
171 show_stack(NULL, NULL);
172
173 /* Very crude. dump some from the end of the table too */
174 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
175 iommu_leak_pages);
176 for (i = 0; i < iommu_leak_pages; i += 2) {
177 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
178 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
179 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
180 }
181 printk(KERN_DEBUG "\n");
182 }
183 #else
184 # define SET_LEAK(x)
185 # define CLEAR_LEAK(x)
186 #endif
187
188 static void iommu_full(struct device *dev, size_t size, int dir)
189 {
190 /*
191 * Ran out of IOMMU space for this operation. This is very bad.
192 * Unfortunately the drivers cannot handle this operation properly.
193 * Return some non mapped prereserved space in the aperture and
194 * let the Northbridge deal with it. This will result in garbage
195 * in the IO operation. When the size exceeds the prereserved space
196 * memory corruption will occur or random memory will be DMAed
197 * out. Hopefully no network devices use single mappings that big.
198 */
199
200 printk(KERN_ERR
201 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
202 size, dev->bus_id);
203
204 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
205 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
206 panic("PCI-DMA: Memory would be corrupted\n");
207 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
208 panic(KERN_ERR
209 "PCI-DMA: Random memory would be DMAed\n");
210 }
211 #ifdef CONFIG_IOMMU_LEAK
212 dump_leak();
213 #endif
214 }
215
216 static inline int
217 need_iommu(struct device *dev, unsigned long addr, size_t size)
218 {
219 u64 mask = *dev->dma_mask;
220 int high = addr + size > mask;
221 int mmu = high;
222
223 if (force_iommu)
224 mmu = 1;
225
226 return mmu;
227 }
228
229 static inline int
230 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
231 {
232 u64 mask = *dev->dma_mask;
233 int high = addr + size > mask;
234 int mmu = high;
235
236 return mmu;
237 }
238
239 /* Map a single continuous physical area into the IOMMU.
240 * Caller needs to check if the iommu is needed and flush.
241 */
242 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
243 size_t size, int dir)
244 {
245 unsigned long npages = to_pages(phys_mem, size);
246 unsigned long iommu_page = alloc_iommu(dev, npages);
247 int i;
248
249 if (iommu_page == -1) {
250 if (!nonforced_iommu(dev, phys_mem, size))
251 return phys_mem;
252 if (panic_on_overflow)
253 panic("dma_map_area overflow %lu bytes\n", size);
254 iommu_full(dev, size, dir);
255 return bad_dma_address;
256 }
257
258 for (i = 0; i < npages; i++) {
259 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
260 SET_LEAK(iommu_page + i);
261 phys_mem += PAGE_SIZE;
262 }
263 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
264 }
265
266 static dma_addr_t
267 gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
268 {
269 dma_addr_t map = dma_map_area(dev, paddr, size, dir);
270
271 flush_gart();
272
273 return map;
274 }
275
276 /* Map a single area into the IOMMU */
277 static dma_addr_t
278 gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
279 {
280 unsigned long bus;
281
282 if (!dev)
283 dev = &fallback_dev;
284
285 if (!need_iommu(dev, paddr, size))
286 return paddr;
287
288 bus = gart_map_simple(dev, paddr, size, dir);
289
290 return bus;
291 }
292
293 /*
294 * Free a DMA mapping.
295 */
296 static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
297 size_t size, int direction)
298 {
299 unsigned long iommu_page;
300 int npages;
301 int i;
302
303 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
304 dma_addr >= iommu_bus_base + iommu_size)
305 return;
306
307 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
308 npages = to_pages(dma_addr, size);
309 for (i = 0; i < npages; i++) {
310 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
311 CLEAR_LEAK(iommu_page + i);
312 }
313 free_iommu(iommu_page, npages);
314 }
315
316 /*
317 * Wrapper for pci_unmap_single working with scatterlists.
318 */
319 static void
320 gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
321 {
322 struct scatterlist *s;
323 int i;
324
325 for_each_sg(sg, s, nents, i) {
326 if (!s->dma_length || !s->length)
327 break;
328 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
329 }
330 }
331
332 /* Fallback for dma_map_sg in case of overflow */
333 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
334 int nents, int dir)
335 {
336 struct scatterlist *s;
337 int i;
338
339 #ifdef CONFIG_IOMMU_DEBUG
340 printk(KERN_DEBUG "dma_map_sg overflow\n");
341 #endif
342
343 for_each_sg(sg, s, nents, i) {
344 unsigned long addr = sg_phys(s);
345
346 if (nonforced_iommu(dev, addr, s->length)) {
347 addr = dma_map_area(dev, addr, s->length, dir);
348 if (addr == bad_dma_address) {
349 if (i > 0)
350 gart_unmap_sg(dev, sg, i, dir);
351 nents = 0;
352 sg[0].dma_length = 0;
353 break;
354 }
355 }
356 s->dma_address = addr;
357 s->dma_length = s->length;
358 }
359 flush_gart();
360
361 return nents;
362 }
363
364 /* Map multiple scatterlist entries continuous into the first. */
365 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
366 int nelems, struct scatterlist *sout,
367 unsigned long pages)
368 {
369 unsigned long iommu_start = alloc_iommu(dev, pages);
370 unsigned long iommu_page = iommu_start;
371 struct scatterlist *s;
372 int i;
373
374 if (iommu_start == -1)
375 return -1;
376
377 for_each_sg(start, s, nelems, i) {
378 unsigned long pages, addr;
379 unsigned long phys_addr = s->dma_address;
380
381 BUG_ON(s != start && s->offset);
382 if (s == start) {
383 sout->dma_address = iommu_bus_base;
384 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
385 sout->dma_length = s->length;
386 } else {
387 sout->dma_length += s->length;
388 }
389
390 addr = phys_addr;
391 pages = to_pages(s->offset, s->length);
392 while (pages--) {
393 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
394 SET_LEAK(iommu_page);
395 addr += PAGE_SIZE;
396 iommu_page++;
397 }
398 }
399 BUG_ON(iommu_page - iommu_start != pages);
400
401 return 0;
402 }
403
404 static inline int
405 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
406 struct scatterlist *sout, unsigned long pages, int need)
407 {
408 if (!need) {
409 BUG_ON(nelems != 1);
410 sout->dma_address = start->dma_address;
411 sout->dma_length = start->length;
412 return 0;
413 }
414 return __dma_map_cont(dev, start, nelems, sout, pages);
415 }
416
417 /*
418 * DMA map all entries in a scatterlist.
419 * Merge chunks that have page aligned sizes into a continuous mapping.
420 */
421 static int
422 gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
423 {
424 struct scatterlist *s, *ps, *start_sg, *sgmap;
425 int need = 0, nextneed, i, out, start;
426 unsigned long pages = 0;
427 unsigned int seg_size;
428 unsigned int max_seg_size;
429
430 if (nents == 0)
431 return 0;
432
433 if (!dev)
434 dev = &fallback_dev;
435
436 out = 0;
437 start = 0;
438 start_sg = sgmap = sg;
439 seg_size = 0;
440 max_seg_size = dma_get_max_seg_size(dev);
441 ps = NULL; /* shut up gcc */
442 for_each_sg(sg, s, nents, i) {
443 dma_addr_t addr = sg_phys(s);
444
445 s->dma_address = addr;
446 BUG_ON(s->length == 0);
447
448 nextneed = need_iommu(dev, addr, s->length);
449
450 /* Handle the previous not yet processed entries */
451 if (i > start) {
452 /*
453 * Can only merge when the last chunk ends on a
454 * page boundary and the new one doesn't have an
455 * offset.
456 */
457 if (!iommu_merge || !nextneed || !need || s->offset ||
458 (s->length + seg_size > max_seg_size) ||
459 (ps->offset + ps->length) % PAGE_SIZE) {
460 if (dma_map_cont(dev, start_sg, i - start,
461 sgmap, pages, need) < 0)
462 goto error;
463 out++;
464 seg_size = 0;
465 sgmap = sg_next(sgmap);
466 pages = 0;
467 start = i;
468 start_sg = s;
469 }
470 }
471
472 seg_size += s->length;
473 need = nextneed;
474 pages += to_pages(s->offset, s->length);
475 ps = s;
476 }
477 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
478 goto error;
479 out++;
480 flush_gart();
481 if (out < nents) {
482 sgmap = sg_next(sgmap);
483 sgmap->dma_length = 0;
484 }
485 return out;
486
487 error:
488 flush_gart();
489 gart_unmap_sg(dev, sg, out, dir);
490
491 /* When it was forced or merged try again in a dumb way */
492 if (force_iommu || iommu_merge) {
493 out = dma_map_sg_nonforce(dev, sg, nents, dir);
494 if (out > 0)
495 return out;
496 }
497 if (panic_on_overflow)
498 panic("dma_map_sg: overflow on %lu pages\n", pages);
499
500 iommu_full(dev, pages << PAGE_SHIFT, dir);
501 for_each_sg(sg, s, nents, i)
502 s->dma_address = bad_dma_address;
503 return 0;
504 }
505
506 static int no_agp;
507
508 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
509 {
510 unsigned long a;
511
512 if (!iommu_size) {
513 iommu_size = aper_size;
514 if (!no_agp)
515 iommu_size /= 2;
516 }
517
518 a = aper + iommu_size;
519 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
520
521 if (iommu_size < 64*1024*1024) {
522 printk(KERN_WARNING
523 "PCI-DMA: Warning: Small IOMMU %luMB."
524 " Consider increasing the AGP aperture in BIOS\n",
525 iommu_size >> 20);
526 }
527
528 return iommu_size;
529 }
530
531 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
532 {
533 unsigned aper_size = 0, aper_base_32, aper_order;
534 u64 aper_base;
535
536 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
537 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
538 aper_order = (aper_order >> 1) & 7;
539
540 aper_base = aper_base_32 & 0x7fff;
541 aper_base <<= 25;
542
543 aper_size = (32 * 1024 * 1024) << aper_order;
544 if (aper_base + aper_size > 0x100000000UL || !aper_size)
545 aper_base = 0;
546
547 *size = aper_size;
548 return aper_base;
549 }
550
551 static void enable_gart_translations(void)
552 {
553 int i;
554
555 for (i = 0; i < num_k8_northbridges; i++) {
556 struct pci_dev *dev = k8_northbridges[i];
557
558 enable_gart_translation(dev, __pa(agp_gatt_table));
559 }
560 }
561
562 /*
563 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
564 * resume in the same way as they are handled in gart_iommu_hole_init().
565 */
566 static bool fix_up_north_bridges;
567 static u32 aperture_order;
568 static u32 aperture_alloc;
569
570 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
571 {
572 fix_up_north_bridges = true;
573 aperture_order = aper_order;
574 aperture_alloc = aper_alloc;
575 }
576
577 static int gart_resume(struct sys_device *dev)
578 {
579 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
580
581 if (fix_up_north_bridges) {
582 int i;
583
584 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
585
586 for (i = 0; i < num_k8_northbridges; i++) {
587 struct pci_dev *dev = k8_northbridges[i];
588
589 /*
590 * Don't enable translations just yet. That is the next
591 * step. Restore the pre-suspend aperture settings.
592 */
593 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
594 aperture_order << 1);
595 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
596 aperture_alloc >> 25);
597 }
598 }
599
600 enable_gart_translations();
601
602 return 0;
603 }
604
605 static int gart_suspend(struct sys_device *dev, pm_message_t state)
606 {
607 return 0;
608 }
609
610 static struct sysdev_class gart_sysdev_class = {
611 .name = "gart",
612 .suspend = gart_suspend,
613 .resume = gart_resume,
614
615 };
616
617 static struct sys_device device_gart = {
618 .id = 0,
619 .cls = &gart_sysdev_class,
620 };
621
622 /*
623 * Private Northbridge GATT initialization in case we cannot use the
624 * AGP driver for some reason.
625 */
626 static __init int init_k8_gatt(struct agp_kern_info *info)
627 {
628 unsigned aper_size, gatt_size, new_aper_size;
629 unsigned aper_base, new_aper_base;
630 struct pci_dev *dev;
631 void *gatt;
632 int i, error;
633 unsigned long start_pfn, end_pfn;
634
635 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
636 aper_size = aper_base = info->aper_size = 0;
637 dev = NULL;
638 for (i = 0; i < num_k8_northbridges; i++) {
639 dev = k8_northbridges[i];
640 new_aper_base = read_aperture(dev, &new_aper_size);
641 if (!new_aper_base)
642 goto nommu;
643
644 if (!aper_base) {
645 aper_size = new_aper_size;
646 aper_base = new_aper_base;
647 }
648 if (aper_size != new_aper_size || aper_base != new_aper_base)
649 goto nommu;
650 }
651 if (!aper_base)
652 goto nommu;
653 info->aper_base = aper_base;
654 info->aper_size = aper_size >> 20;
655
656 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
657 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
658 if (!gatt)
659 panic("Cannot allocate GATT table");
660 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
661 panic("Could not set GART PTEs to uncacheable pages");
662
663 memset(gatt, 0, gatt_size);
664 agp_gatt_table = gatt;
665
666 enable_gart_translations();
667
668 error = sysdev_class_register(&gart_sysdev_class);
669 if (!error)
670 error = sysdev_register(&device_gart);
671 if (error)
672 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
673
674 flush_gart();
675
676 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
677 aper_base, aper_size>>10);
678
679 /* need to map that range */
680 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
681 if (end_pfn > max_low_pfn_mapped) {
682 start_pfn = (aper_base>>PAGE_SHIFT);
683 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
684 }
685 return 0;
686
687 nommu:
688 /* Should not happen anymore */
689 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
690 KERN_WARNING "falling back to iommu=soft.\n");
691 return -1;
692 }
693
694 extern int agp_amd64_init(void);
695
696 static const struct dma_mapping_ops gart_dma_ops = {
697 .mapping_error = NULL,
698 .map_single = gart_map_single,
699 .map_simple = gart_map_simple,
700 .unmap_single = gart_unmap_single,
701 .sync_single_for_cpu = NULL,
702 .sync_single_for_device = NULL,
703 .sync_single_range_for_cpu = NULL,
704 .sync_single_range_for_device = NULL,
705 .sync_sg_for_cpu = NULL,
706 .sync_sg_for_device = NULL,
707 .map_sg = gart_map_sg,
708 .unmap_sg = gart_unmap_sg,
709 };
710
711 void gart_iommu_shutdown(void)
712 {
713 struct pci_dev *dev;
714 int i;
715
716 if (no_agp && (dma_ops != &gart_dma_ops))
717 return;
718
719 for (i = 0; i < num_k8_northbridges; i++) {
720 u32 ctl;
721
722 dev = k8_northbridges[i];
723 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
724
725 ctl &= ~GARTEN;
726
727 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
728 }
729 }
730
731 void __init gart_iommu_init(void)
732 {
733 struct agp_kern_info info;
734 unsigned long iommu_start;
735 unsigned long aper_size;
736 unsigned long scratch;
737 long i;
738
739 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
740 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
741 return;
742 }
743
744 #ifndef CONFIG_AGP_AMD64
745 no_agp = 1;
746 #else
747 /* Makefile puts PCI initialization via subsys_initcall first. */
748 /* Add other K8 AGP bridge drivers here */
749 no_agp = no_agp ||
750 (agp_amd64_init() < 0) ||
751 (agp_copy_info(agp_bridge, &info) < 0);
752 #endif
753
754 if (swiotlb)
755 return;
756
757 /* Did we detect a different HW IOMMU? */
758 if (iommu_detected && !gart_iommu_aperture)
759 return;
760
761 if (no_iommu ||
762 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
763 !gart_iommu_aperture ||
764 (no_agp && init_k8_gatt(&info) < 0)) {
765 if (max_pfn > MAX_DMA32_PFN) {
766 printk(KERN_WARNING "More than 4GB of memory "
767 "but GART IOMMU not available.\n"
768 KERN_WARNING "falling back to iommu=soft.\n");
769 }
770 return;
771 }
772
773 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
774 aper_size = info.aper_size * 1024 * 1024;
775 iommu_size = check_iommu_size(info.aper_base, aper_size);
776 iommu_pages = iommu_size >> PAGE_SHIFT;
777
778 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
779 get_order(iommu_pages/8));
780 if (!iommu_gart_bitmap)
781 panic("Cannot allocate iommu bitmap\n");
782 memset(iommu_gart_bitmap, 0, iommu_pages/8);
783
784 #ifdef CONFIG_IOMMU_LEAK
785 if (leak_trace) {
786 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
787 get_order(iommu_pages*sizeof(void *)));
788 if (iommu_leak_tab)
789 memset(iommu_leak_tab, 0, iommu_pages * 8);
790 else
791 printk(KERN_DEBUG
792 "PCI-DMA: Cannot allocate leak trace area\n");
793 }
794 #endif
795
796 /*
797 * Out of IOMMU space handling.
798 * Reserve some invalid pages at the beginning of the GART.
799 */
800 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
801
802 agp_memory_reserved = iommu_size;
803 printk(KERN_INFO
804 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
805 iommu_size >> 20);
806
807 iommu_start = aper_size - iommu_size;
808 iommu_bus_base = info.aper_base + iommu_start;
809 bad_dma_address = iommu_bus_base;
810 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
811
812 /*
813 * Unmap the IOMMU part of the GART. The alias of the page is
814 * always mapped with cache enabled and there is no full cache
815 * coherency across the GART remapping. The unmapping avoids
816 * automatic prefetches from the CPU allocating cache lines in
817 * there. All CPU accesses are done via the direct mapping to
818 * the backing memory. The GART address is only used by PCI
819 * devices.
820 */
821 set_memory_np((unsigned long)__va(iommu_bus_base),
822 iommu_size >> PAGE_SHIFT);
823 /*
824 * Tricky. The GART table remaps the physical memory range,
825 * so the CPU wont notice potential aliases and if the memory
826 * is remapped to UC later on, we might surprise the PCI devices
827 * with a stray writeout of a cacheline. So play it sure and
828 * do an explicit, full-scale wbinvd() _after_ having marked all
829 * the pages as Not-Present:
830 */
831 wbinvd();
832
833 /*
834 * Try to workaround a bug (thanks to BenH):
835 * Set unmapped entries to a scratch page instead of 0.
836 * Any prefetches that hit unmapped entries won't get an bus abort
837 * then. (P2P bridge may be prefetching on DMA reads).
838 */
839 scratch = get_zeroed_page(GFP_KERNEL);
840 if (!scratch)
841 panic("Cannot allocate iommu scratch page");
842 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
843 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
844 iommu_gatt_base[i] = gart_unmapped_entry;
845
846 flush_gart();
847 dma_ops = &gart_dma_ops;
848 }
849
850 void __init gart_parse_options(char *p)
851 {
852 int arg;
853
854 #ifdef CONFIG_IOMMU_LEAK
855 if (!strncmp(p, "leak", 4)) {
856 leak_trace = 1;
857 p += 4;
858 if (*p == '=') ++p;
859 if (isdigit(*p) && get_option(&p, &arg))
860 iommu_leak_pages = arg;
861 }
862 #endif
863 if (isdigit(*p) && get_option(&p, &arg))
864 iommu_size = arg;
865 if (!strncmp(p, "fullflush", 8))
866 iommu_fullflush = 1;
867 if (!strncmp(p, "nofullflush", 11))
868 iommu_fullflush = 0;
869 if (!strncmp(p, "noagp", 5))
870 no_agp = 1;
871 if (!strncmp(p, "noaperture", 10))
872 fix_aperture = 0;
873 /* duplicated from pci-dma.c */
874 if (!strncmp(p, "force", 5))
875 gart_iommu_aperture_allowed = 1;
876 if (!strncmp(p, "allowed", 7))
877 gart_iommu_aperture_allowed = 1;
878 if (!strncmp(p, "memaper", 7)) {
879 fallback_aper_force = 1;
880 p += 7;
881 if (*p == '=') {
882 ++p;
883 if (get_option(&p, &arg))
884 fallback_aper_order = arg;
885 }
886 }
887 }
This page took 0.048519 seconds and 5 git commands to generate.