Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
3 | * | |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. | |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
6 | * with more than 4GB. | |
7 | * | |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | |
9 | * | |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
11 | */ | |
12 | ||
13 | #include <linux/config.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ctype.h> | |
16 | #include <linux/agp_backend.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/pci.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/topology.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/bitops.h> | |
26 | #include <asm/atomic.h> | |
27 | #include <asm/io.h> | |
28 | #include <asm/mtrr.h> | |
29 | #include <asm/pgtable.h> | |
30 | #include <asm/proto.h> | |
31 | #include <asm/cacheflush.h> | |
32 | #include <asm/kdebug.h> | |
17a941d8 MBY |
33 | #include <asm/swiotlb.h> |
34 | #include <asm/dma.h> | |
a32073bf | 35 | #include <asm/k8.h> |
1da177e4 LT |
36 | |
37 | unsigned long iommu_bus_base; /* GART remapping area (physical) */ | |
38 | static unsigned long iommu_size; /* size of remapping area bytes */ | |
39 | static unsigned long iommu_pages; /* .. and in pages */ | |
40 | ||
41 | u32 *iommu_gatt_base; /* Remapping table */ | |
42 | ||
1da177e4 LT |
43 | /* If this is disabled the IOMMU will use an optimized flushing strategy |
44 | of only flushing when an mapping is reused. With it true the GART is flushed | |
45 | for every mapping. Problem is that doing the lazy flush seems to trigger | |
46 | bugs with some popular PCI cards, in particular 3ware (but has been also | |
47 | also seen with Qlogic at least). */ | |
48 | int iommu_fullflush = 1; | |
49 | ||
1da177e4 LT |
50 | /* Allocation bitmap for the remapping area */ |
51 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | |
52 | static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ | |
53 | ||
54 | static u32 gart_unmapped_entry; | |
55 | ||
56 | #define GPTE_VALID 1 | |
57 | #define GPTE_COHERENT 2 | |
58 | #define GPTE_ENCODE(x) \ | |
59 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
60 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
61 | ||
62 | #define to_pages(addr,size) \ | |
63 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | |
64 | ||
1da177e4 LT |
65 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
66 | ||
67 | #ifdef CONFIG_AGP | |
68 | #define AGPEXTERN extern | |
69 | #else | |
70 | #define AGPEXTERN | |
71 | #endif | |
72 | ||
73 | /* backdoor interface to AGP driver */ | |
74 | AGPEXTERN int agp_memory_reserved; | |
75 | AGPEXTERN __u32 *agp_gatt_table; | |
76 | ||
77 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
78 | static int need_flush; /* global flush state. set for each gart wrap */ | |
1da177e4 LT |
79 | |
80 | static unsigned long alloc_iommu(int size) | |
81 | { | |
82 | unsigned long offset, flags; | |
83 | ||
84 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | |
85 | offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); | |
86 | if (offset == -1) { | |
87 | need_flush = 1; | |
f5adc9c7 | 88 | offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size); |
1da177e4 LT |
89 | } |
90 | if (offset != -1) { | |
91 | set_bit_string(iommu_gart_bitmap, offset, size); | |
92 | next_bit = offset+size; | |
93 | if (next_bit >= iommu_pages) { | |
94 | next_bit = 0; | |
95 | need_flush = 1; | |
96 | } | |
97 | } | |
98 | if (iommu_fullflush) | |
99 | need_flush = 1; | |
100 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
101 | return offset; | |
102 | } | |
103 | ||
104 | static void free_iommu(unsigned long offset, int size) | |
105 | { | |
106 | unsigned long flags; | |
1da177e4 LT |
107 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
108 | __clear_bit_string(iommu_gart_bitmap, offset, size); | |
109 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
110 | } | |
111 | ||
112 | /* | |
113 | * Use global flush state to avoid races with multiple flushers. | |
114 | */ | |
a32073bf | 115 | static void flush_gart(void) |
1da177e4 LT |
116 | { |
117 | unsigned long flags; | |
1da177e4 | 118 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a32073bf AK |
119 | if (need_flush) { |
120 | k8_flush_garts(); | |
1da177e4 LT |
121 | need_flush = 0; |
122 | } | |
123 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
124 | } | |
125 | ||
1da177e4 LT |
126 | #ifdef CONFIG_IOMMU_LEAK |
127 | ||
128 | #define SET_LEAK(x) if (iommu_leak_tab) \ | |
129 | iommu_leak_tab[x] = __builtin_return_address(0); | |
130 | #define CLEAR_LEAK(x) if (iommu_leak_tab) \ | |
131 | iommu_leak_tab[x] = NULL; | |
132 | ||
133 | /* Debugging aid for drivers that don't free their IOMMU tables */ | |
134 | static void **iommu_leak_tab; | |
135 | static int leak_trace; | |
136 | int iommu_leak_pages = 20; | |
137 | void dump_leak(void) | |
138 | { | |
139 | int i; | |
140 | static int dump; | |
141 | if (dump || !iommu_leak_tab) return; | |
142 | dump = 1; | |
143 | show_stack(NULL,NULL); | |
144 | /* Very crude. dump some from the end of the table too */ | |
145 | printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); | |
146 | for (i = 0; i < iommu_leak_pages; i+=2) { | |
147 | printk("%lu: ", iommu_pages-i); | |
148 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]); | |
149 | printk("%c", (i+1)%2 == 0 ? '\n' : ' '); | |
150 | } | |
151 | printk("\n"); | |
152 | } | |
153 | #else | |
154 | #define SET_LEAK(x) | |
155 | #define CLEAR_LEAK(x) | |
156 | #endif | |
157 | ||
17a941d8 | 158 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 LT |
159 | { |
160 | /* | |
161 | * Ran out of IOMMU space for this operation. This is very bad. | |
162 | * Unfortunately the drivers cannot handle this operation properly. | |
163 | * Return some non mapped prereserved space in the aperture and | |
164 | * let the Northbridge deal with it. This will result in garbage | |
165 | * in the IO operation. When the size exceeds the prereserved space | |
166 | * memory corruption will occur or random memory will be DMAed | |
167 | * out. Hopefully no network devices use single mappings that big. | |
168 | */ | |
169 | ||
170 | printk(KERN_ERR | |
171 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | |
172 | size, dev->bus_id); | |
173 | ||
17a941d8 | 174 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
175 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
176 | panic("PCI-DMA: Memory would be corrupted\n"); | |
177 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) | |
17a941d8 | 178 | panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n"); |
1da177e4 LT |
179 | } |
180 | ||
181 | #ifdef CONFIG_IOMMU_LEAK | |
182 | dump_leak(); | |
183 | #endif | |
184 | } | |
185 | ||
186 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) | |
187 | { | |
188 | u64 mask = *dev->dma_mask; | |
189 | int high = addr + size >= mask; | |
190 | int mmu = high; | |
191 | if (force_iommu) | |
192 | mmu = 1; | |
1da177e4 LT |
193 | return mmu; |
194 | } | |
195 | ||
196 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
197 | { | |
198 | u64 mask = *dev->dma_mask; | |
199 | int high = addr + size >= mask; | |
200 | int mmu = high; | |
1da177e4 LT |
201 | return mmu; |
202 | } | |
203 | ||
204 | /* Map a single continuous physical area into the IOMMU. | |
205 | * Caller needs to check if the iommu is needed and flush. | |
206 | */ | |
17a941d8 MBY |
207 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
208 | size_t size, int dir) | |
1da177e4 LT |
209 | { |
210 | unsigned long npages = to_pages(phys_mem, size); | |
211 | unsigned long iommu_page = alloc_iommu(npages); | |
212 | int i; | |
213 | if (iommu_page == -1) { | |
214 | if (!nonforced_iommu(dev, phys_mem, size)) | |
215 | return phys_mem; | |
216 | if (panic_on_overflow) | |
217 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 218 | iommu_full(dev, size, dir); |
1da177e4 LT |
219 | return bad_dma_address; |
220 | } | |
221 | ||
222 | for (i = 0; i < npages; i++) { | |
223 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
224 | SET_LEAK(iommu_page + i); | |
225 | phys_mem += PAGE_SIZE; | |
226 | } | |
227 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
228 | } | |
229 | ||
17a941d8 MBY |
230 | static dma_addr_t gart_map_simple(struct device *dev, char *buf, |
231 | size_t size, int dir) | |
232 | { | |
233 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); | |
a32073bf | 234 | flush_gart(); |
17a941d8 MBY |
235 | return map; |
236 | } | |
237 | ||
1da177e4 | 238 | /* Map a single area into the IOMMU */ |
17a941d8 | 239 | dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) |
1da177e4 LT |
240 | { |
241 | unsigned long phys_mem, bus; | |
242 | ||
243 | BUG_ON(dir == DMA_NONE); | |
244 | ||
1da177e4 LT |
245 | if (!dev) |
246 | dev = &fallback_dev; | |
247 | ||
248 | phys_mem = virt_to_phys(addr); | |
249 | if (!need_iommu(dev, phys_mem, size)) | |
250 | return phys_mem; | |
251 | ||
17a941d8 | 252 | bus = gart_map_simple(dev, addr, size, dir); |
1da177e4 | 253 | return bus; |
17a941d8 MBY |
254 | } |
255 | ||
7c2d9cd2 JM |
256 | /* |
257 | * Free a DMA mapping. | |
258 | */ | |
259 | void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |
260 | size_t size, int direction) | |
261 | { | |
262 | unsigned long iommu_page; | |
263 | int npages; | |
264 | int i; | |
265 | ||
266 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | |
267 | dma_addr >= iommu_bus_base + iommu_size) | |
268 | return; | |
269 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | |
270 | npages = to_pages(dma_addr, size); | |
271 | for (i = 0; i < npages; i++) { | |
272 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
273 | CLEAR_LEAK(iommu_page + i); | |
274 | } | |
275 | free_iommu(iommu_page, npages); | |
276 | } | |
277 | ||
17a941d8 MBY |
278 | /* |
279 | * Wrapper for pci_unmap_single working with scatterlists. | |
280 | */ | |
281 | void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |
282 | { | |
283 | int i; | |
284 | ||
285 | for (i = 0; i < nents; i++) { | |
286 | struct scatterlist *s = &sg[i]; | |
60b08c67 | 287 | if (!s->dma_length || !s->length) |
17a941d8 | 288 | break; |
7c2d9cd2 | 289 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
17a941d8 MBY |
290 | } |
291 | } | |
1da177e4 LT |
292 | |
293 | /* Fallback for dma_map_sg in case of overflow */ | |
294 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
295 | int nents, int dir) | |
296 | { | |
297 | int i; | |
298 | ||
299 | #ifdef CONFIG_IOMMU_DEBUG | |
300 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | |
301 | #endif | |
302 | ||
303 | for (i = 0; i < nents; i++ ) { | |
304 | struct scatterlist *s = &sg[i]; | |
305 | unsigned long addr = page_to_phys(s->page) + s->offset; | |
306 | if (nonforced_iommu(dev, addr, s->length)) { | |
17a941d8 | 307 | addr = dma_map_area(dev, addr, s->length, dir); |
1da177e4 LT |
308 | if (addr == bad_dma_address) { |
309 | if (i > 0) | |
17a941d8 | 310 | gart_unmap_sg(dev, sg, i, dir); |
1da177e4 LT |
311 | nents = 0; |
312 | sg[0].dma_length = 0; | |
313 | break; | |
314 | } | |
315 | } | |
316 | s->dma_address = addr; | |
317 | s->dma_length = s->length; | |
318 | } | |
a32073bf | 319 | flush_gart(); |
1da177e4 LT |
320 | return nents; |
321 | } | |
322 | ||
323 | /* Map multiple scatterlist entries continuous into the first. */ | |
324 | static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, | |
325 | struct scatterlist *sout, unsigned long pages) | |
326 | { | |
327 | unsigned long iommu_start = alloc_iommu(pages); | |
328 | unsigned long iommu_page = iommu_start; | |
329 | int i; | |
330 | ||
331 | if (iommu_start == -1) | |
332 | return -1; | |
333 | ||
334 | for (i = start; i < stopat; i++) { | |
335 | struct scatterlist *s = &sg[i]; | |
336 | unsigned long pages, addr; | |
337 | unsigned long phys_addr = s->dma_address; | |
338 | ||
339 | BUG_ON(i > start && s->offset); | |
340 | if (i == start) { | |
60b08c67 | 341 | *sout = *s; |
1da177e4 LT |
342 | sout->dma_address = iommu_bus_base; |
343 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
344 | sout->dma_length = s->length; | |
345 | } else { | |
346 | sout->dma_length += s->length; | |
347 | } | |
348 | ||
349 | addr = phys_addr; | |
350 | pages = to_pages(s->offset, s->length); | |
351 | while (pages--) { | |
352 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
353 | SET_LEAK(iommu_page); | |
354 | addr += PAGE_SIZE; | |
355 | iommu_page++; | |
0d541064 | 356 | } |
1da177e4 LT |
357 | } |
358 | BUG_ON(iommu_page - iommu_start != pages); | |
359 | return 0; | |
360 | } | |
361 | ||
362 | static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat, | |
363 | struct scatterlist *sout, | |
364 | unsigned long pages, int need) | |
365 | { | |
366 | if (!need) { | |
367 | BUG_ON(stopat - start != 1); | |
60b08c67 | 368 | *sout = sg[start]; |
1da177e4 LT |
369 | sout->dma_length = sg[start].length; |
370 | return 0; | |
371 | } | |
372 | return __dma_map_cont(sg, start, stopat, sout, pages); | |
373 | } | |
374 | ||
375 | /* | |
376 | * DMA map all entries in a scatterlist. | |
377 | * Merge chunks that have page aligned sizes into a continuous mapping. | |
378 | */ | |
17a941d8 | 379 | int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
1da177e4 LT |
380 | { |
381 | int i; | |
382 | int out; | |
383 | int start; | |
384 | unsigned long pages = 0; | |
385 | int need = 0, nextneed; | |
386 | ||
387 | BUG_ON(dir == DMA_NONE); | |
388 | if (nents == 0) | |
389 | return 0; | |
390 | ||
1da177e4 LT |
391 | if (!dev) |
392 | dev = &fallback_dev; | |
393 | ||
394 | out = 0; | |
395 | start = 0; | |
396 | for (i = 0; i < nents; i++) { | |
397 | struct scatterlist *s = &sg[i]; | |
398 | dma_addr_t addr = page_to_phys(s->page) + s->offset; | |
399 | s->dma_address = addr; | |
400 | BUG_ON(s->length == 0); | |
401 | ||
402 | nextneed = need_iommu(dev, addr, s->length); | |
403 | ||
404 | /* Handle the previous not yet processed entries */ | |
405 | if (i > start) { | |
406 | struct scatterlist *ps = &sg[i-1]; | |
407 | /* Can only merge when the last chunk ends on a page | |
408 | boundary and the new one doesn't have an offset. */ | |
409 | if (!iommu_merge || !nextneed || !need || s->offset || | |
410 | (ps->offset + ps->length) % PAGE_SIZE) { | |
411 | if (dma_map_cont(sg, start, i, sg+out, pages, | |
412 | need) < 0) | |
413 | goto error; | |
414 | out++; | |
415 | pages = 0; | |
416 | start = i; | |
417 | } | |
418 | } | |
419 | ||
420 | need = nextneed; | |
421 | pages += to_pages(s->offset, s->length); | |
422 | } | |
423 | if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) | |
424 | goto error; | |
425 | out++; | |
a32073bf | 426 | flush_gart(); |
1da177e4 LT |
427 | if (out < nents) |
428 | sg[out].dma_length = 0; | |
429 | return out; | |
430 | ||
431 | error: | |
a32073bf | 432 | flush_gart(); |
17a941d8 | 433 | gart_unmap_sg(dev, sg, nents, dir); |
a1002a48 KV |
434 | /* When it was forced or merged try again in a dumb way */ |
435 | if (force_iommu || iommu_merge) { | |
436 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
437 | if (out > 0) | |
438 | return out; | |
439 | } | |
1da177e4 LT |
440 | if (panic_on_overflow) |
441 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
17a941d8 | 442 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
1da177e4 LT |
443 | for (i = 0; i < nents; i++) |
444 | sg[i].dma_address = bad_dma_address; | |
445 | return 0; | |
446 | } | |
447 | ||
17a941d8 | 448 | static int no_agp; |
1da177e4 LT |
449 | |
450 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
451 | { | |
452 | unsigned long a; | |
453 | if (!iommu_size) { | |
454 | iommu_size = aper_size; | |
455 | if (!no_agp) | |
456 | iommu_size /= 2; | |
457 | } | |
458 | ||
459 | a = aper + iommu_size; | |
460 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; | |
461 | ||
462 | if (iommu_size < 64*1024*1024) | |
463 | printk(KERN_WARNING | |
464 | "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); | |
465 | ||
466 | return iommu_size; | |
467 | } | |
468 | ||
469 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | |
470 | { | |
471 | unsigned aper_size = 0, aper_base_32; | |
472 | u64 aper_base; | |
473 | unsigned aper_order; | |
474 | ||
475 | pci_read_config_dword(dev, 0x94, &aper_base_32); | |
476 | pci_read_config_dword(dev, 0x90, &aper_order); | |
477 | aper_order = (aper_order >> 1) & 7; | |
478 | ||
479 | aper_base = aper_base_32 & 0x7fff; | |
480 | aper_base <<= 25; | |
481 | ||
482 | aper_size = (32 * 1024 * 1024) << aper_order; | |
483 | if (aper_base + aper_size >= 0xffffffff || !aper_size) | |
484 | aper_base = 0; | |
485 | ||
486 | *size = aper_size; | |
487 | return aper_base; | |
488 | } | |
489 | ||
490 | /* | |
491 | * Private Northbridge GATT initialization in case we cannot use the | |
492 | * AGP driver for some reason. | |
493 | */ | |
494 | static __init int init_k8_gatt(struct agp_kern_info *info) | |
495 | { | |
496 | struct pci_dev *dev; | |
497 | void *gatt; | |
498 | unsigned aper_base, new_aper_base; | |
499 | unsigned aper_size, gatt_size, new_aper_size; | |
a32073bf AK |
500 | int i; |
501 | ||
1da177e4 LT |
502 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
503 | aper_size = aper_base = info->aper_size = 0; | |
a32073bf AK |
504 | dev = NULL; |
505 | for (i = 0; i < num_k8_northbridges; i++) { | |
506 | dev = k8_northbridges[i]; | |
1da177e4 LT |
507 | new_aper_base = read_aperture(dev, &new_aper_size); |
508 | if (!new_aper_base) | |
509 | goto nommu; | |
510 | ||
511 | if (!aper_base) { | |
512 | aper_size = new_aper_size; | |
513 | aper_base = new_aper_base; | |
514 | } | |
515 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
516 | goto nommu; | |
517 | } | |
518 | if (!aper_base) | |
519 | goto nommu; | |
520 | info->aper_base = aper_base; | |
521 | info->aper_size = aper_size>>20; | |
522 | ||
523 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | |
524 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | |
525 | if (!gatt) | |
526 | panic("Cannot allocate GATT table"); | |
527 | memset(gatt, 0, gatt_size); | |
528 | agp_gatt_table = gatt; | |
a32073bf AK |
529 | |
530 | for (i = 0; i < num_k8_northbridges; i++) { | |
1da177e4 LT |
531 | u32 ctl; |
532 | u32 gatt_reg; | |
533 | ||
a32073bf | 534 | dev = k8_northbridges[i]; |
1da177e4 LT |
535 | gatt_reg = __pa(gatt) >> 12; |
536 | gatt_reg <<= 4; | |
537 | pci_write_config_dword(dev, 0x98, gatt_reg); | |
538 | pci_read_config_dword(dev, 0x90, &ctl); | |
539 | ||
540 | ctl |= 1; | |
541 | ctl &= ~((1<<4) | (1<<5)); | |
542 | ||
543 | pci_write_config_dword(dev, 0x90, ctl); | |
544 | } | |
a32073bf | 545 | flush_gart(); |
1da177e4 LT |
546 | |
547 | printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); | |
548 | return 0; | |
549 | ||
550 | nommu: | |
551 | /* Should not happen anymore */ | |
552 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" | |
f46ace69 | 553 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
1da177e4 LT |
554 | return -1; |
555 | } | |
556 | ||
557 | extern int agp_amd64_init(void); | |
558 | ||
17a941d8 MBY |
559 | static struct dma_mapping_ops gart_dma_ops = { |
560 | .mapping_error = NULL, | |
561 | .map_single = gart_map_single, | |
562 | .map_simple = gart_map_simple, | |
563 | .unmap_single = gart_unmap_single, | |
564 | .sync_single_for_cpu = NULL, | |
565 | .sync_single_for_device = NULL, | |
566 | .sync_single_range_for_cpu = NULL, | |
567 | .sync_single_range_for_device = NULL, | |
568 | .sync_sg_for_cpu = NULL, | |
569 | .sync_sg_for_device = NULL, | |
570 | .map_sg = gart_map_sg, | |
571 | .unmap_sg = gart_unmap_sg, | |
572 | }; | |
573 | ||
1da177e4 LT |
574 | static int __init pci_iommu_init(void) |
575 | { | |
576 | struct agp_kern_info info; | |
577 | unsigned long aper_size; | |
578 | unsigned long iommu_start; | |
1da177e4 LT |
579 | unsigned long scratch; |
580 | long i; | |
581 | ||
a32073bf AK |
582 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
583 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | |
f201611f | 584 | return -ENODEV; |
a32073bf AK |
585 | } |
586 | ||
1da177e4 LT |
587 | #ifndef CONFIG_AGP_AMD64 |
588 | no_agp = 1; | |
589 | #else | |
590 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
591 | /* Add other K8 AGP bridge drivers here */ | |
592 | no_agp = no_agp || | |
593 | (agp_amd64_init() < 0) || | |
594 | (agp_copy_info(agp_bridge, &info) < 0); | |
595 | #endif | |
596 | ||
60b08c67 | 597 | if (swiotlb) |
f201611f | 598 | return -ENODEV; |
60b08c67 | 599 | |
1da177e4 | 600 | if (no_iommu || |
17a941d8 | 601 | (!force_iommu && end_pfn <= MAX_DMA32_PFN) || |
1da177e4 LT |
602 | !iommu_aperture || |
603 | (no_agp && init_k8_gatt(&info) < 0)) { | |
5b7b644c JM |
604 | printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n"); |
605 | if (end_pfn > MAX_DMA32_PFN) { | |
606 | printk(KERN_ERR "WARNING more than 4GB of memory " | |
dc9a7195 AK |
607 | "but IOMMU not available.\n" |
608 | KERN_ERR "WARNING 32bit PCI may malfunction.\n"); | |
5b7b644c | 609 | } |
f201611f | 610 | return -ENODEV; |
1da177e4 LT |
611 | } |
612 | ||
5b7b644c | 613 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
1da177e4 LT |
614 | aper_size = info.aper_size * 1024 * 1024; |
615 | iommu_size = check_iommu_size(info.aper_base, aper_size); | |
616 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
617 | ||
618 | iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, | |
619 | get_order(iommu_pages/8)); | |
620 | if (!iommu_gart_bitmap) | |
621 | panic("Cannot allocate iommu bitmap\n"); | |
622 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | |
623 | ||
624 | #ifdef CONFIG_IOMMU_LEAK | |
625 | if (leak_trace) { | |
626 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | |
627 | get_order(iommu_pages*sizeof(void *))); | |
628 | if (iommu_leak_tab) | |
629 | memset(iommu_leak_tab, 0, iommu_pages * 8); | |
630 | else | |
631 | printk("PCI-DMA: Cannot allocate leak trace area\n"); | |
632 | } | |
633 | #endif | |
634 | ||
635 | /* | |
636 | * Out of IOMMU space handling. | |
637 | * Reserve some invalid pages at the beginning of the GART. | |
638 | */ | |
639 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | |
640 | ||
641 | agp_memory_reserved = iommu_size; | |
642 | printk(KERN_INFO | |
643 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | |
644 | iommu_size>>20); | |
645 | ||
646 | iommu_start = aper_size - iommu_size; | |
647 | iommu_bus_base = info.aper_base + iommu_start; | |
648 | bad_dma_address = iommu_bus_base; | |
649 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
650 | ||
651 | /* | |
652 | * Unmap the IOMMU part of the GART. The alias of the page is | |
653 | * always mapped with cache enabled and there is no full cache | |
654 | * coherency across the GART remapping. The unmapping avoids | |
655 | * automatic prefetches from the CPU allocating cache lines in | |
656 | * there. All CPU accesses are done via the direct mapping to | |
657 | * the backing memory. The GART address is only used by PCI | |
658 | * devices. | |
659 | */ | |
660 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); | |
661 | ||
662 | /* | |
663 | * Try to workaround a bug (thanks to BenH) | |
664 | * Set unmapped entries to a scratch page instead of 0. | |
665 | * Any prefetches that hit unmapped entries won't get an bus abort | |
666 | * then. | |
667 | */ | |
668 | scratch = get_zeroed_page(GFP_KERNEL); | |
669 | if (!scratch) | |
670 | panic("Cannot allocate iommu scratch page"); | |
671 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
672 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) | |
673 | iommu_gatt_base[i] = gart_unmapped_entry; | |
674 | ||
a32073bf | 675 | flush_gart(); |
17a941d8 | 676 | dma_ops = &gart_dma_ops; |
1da177e4 LT |
677 | return 0; |
678 | } | |
679 | ||
680 | /* Must execute after PCI subsystem */ | |
681 | fs_initcall(pci_iommu_init); | |
682 | ||
17a941d8 MBY |
683 | void gart_parse_options(char *p) |
684 | { | |
685 | int arg; | |
686 | ||
1da177e4 | 687 | #ifdef CONFIG_IOMMU_LEAK |
17a941d8 MBY |
688 | if (!strncmp(p,"leak",4)) { |
689 | leak_trace = 1; | |
690 | p += 4; | |
691 | if (*p == '=') ++p; | |
692 | if (isdigit(*p) && get_option(&p, &arg)) | |
693 | iommu_leak_pages = arg; | |
694 | } | |
1da177e4 | 695 | #endif |
17a941d8 MBY |
696 | if (isdigit(*p) && get_option(&p, &arg)) |
697 | iommu_size = arg; | |
698 | if (!strncmp(p, "fullflush",8)) | |
699 | iommu_fullflush = 1; | |
700 | if (!strncmp(p, "nofullflush",11)) | |
701 | iommu_fullflush = 0; | |
702 | if (!strncmp(p,"noagp",5)) | |
703 | no_agp = 1; | |
704 | if (!strncmp(p, "noaperture",10)) | |
705 | fix_aperture = 0; | |
706 | /* duplicated from pci-dma.c */ | |
707 | if (!strncmp(p,"force",5)) | |
708 | iommu_aperture_allowed = 1; | |
709 | if (!strncmp(p,"allowed",7)) | |
710 | iommu_aperture_allowed = 1; | |
711 | if (!strncmp(p, "memaper", 7)) { | |
712 | fallback_aper_force = 1; | |
713 | p += 7; | |
714 | if (*p == '=') { | |
715 | ++p; | |
716 | if (get_option(&p, &arg)) | |
717 | fallback_aper_order = arg; | |
718 | } | |
719 | } | |
720 | } |