Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | |
3 | * | |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. | |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | |
6 | * with more than 4GB. | |
7 | * | |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | |
9 | * | |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
11 | */ | |
12 | ||
1da177e4 LT |
13 | #include <linux/types.h> |
14 | #include <linux/ctype.h> | |
15 | #include <linux/agp_backend.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/pci.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/topology.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/bitops.h> | |
1eeb66a1 | 25 | #include <linux/kdebug.h> |
9ee1bea4 | 26 | #include <linux/scatterlist.h> |
1da177e4 LT |
27 | #include <asm/atomic.h> |
28 | #include <asm/io.h> | |
29 | #include <asm/mtrr.h> | |
30 | #include <asm/pgtable.h> | |
31 | #include <asm/proto.h> | |
f2cf8e08 | 32 | #include <asm/iommu.h> |
1da177e4 | 33 | #include <asm/cacheflush.h> |
17a941d8 MBY |
34 | #include <asm/swiotlb.h> |
35 | #include <asm/dma.h> | |
a32073bf | 36 | #include <asm/k8.h> |
1da177e4 LT |
37 | |
38 | unsigned long iommu_bus_base; /* GART remapping area (physical) */ | |
39 | static unsigned long iommu_size; /* size of remapping area bytes */ | |
40 | static unsigned long iommu_pages; /* .. and in pages */ | |
41 | ||
42 | u32 *iommu_gatt_base; /* Remapping table */ | |
43 | ||
1da177e4 LT |
44 | /* If this is disabled the IOMMU will use an optimized flushing strategy |
45 | of only flushing when an mapping is reused. With it true the GART is flushed | |
46 | for every mapping. Problem is that doing the lazy flush seems to trigger | |
47 | bugs with some popular PCI cards, in particular 3ware (but has been also | |
48 | also seen with Qlogic at least). */ | |
49 | int iommu_fullflush = 1; | |
50 | ||
1da177e4 LT |
51 | /* Allocation bitmap for the remapping area */ |
52 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | |
53 | static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ | |
54 | ||
55 | static u32 gart_unmapped_entry; | |
56 | ||
57 | #define GPTE_VALID 1 | |
58 | #define GPTE_COHERENT 2 | |
59 | #define GPTE_ENCODE(x) \ | |
60 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | |
61 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | |
62 | ||
63 | #define to_pages(addr,size) \ | |
64 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | |
65 | ||
1da177e4 LT |
66 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
67 | ||
68 | #ifdef CONFIG_AGP | |
69 | #define AGPEXTERN extern | |
70 | #else | |
71 | #define AGPEXTERN | |
72 | #endif | |
73 | ||
74 | /* backdoor interface to AGP driver */ | |
75 | AGPEXTERN int agp_memory_reserved; | |
76 | AGPEXTERN __u32 *agp_gatt_table; | |
77 | ||
78 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | |
79 | static int need_flush; /* global flush state. set for each gart wrap */ | |
1da177e4 LT |
80 | |
81 | static unsigned long alloc_iommu(int size) | |
82 | { | |
83 | unsigned long offset, flags; | |
84 | ||
85 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | |
86 | offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); | |
87 | if (offset == -1) { | |
88 | need_flush = 1; | |
f5adc9c7 | 89 | offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size); |
1da177e4 LT |
90 | } |
91 | if (offset != -1) { | |
92 | set_bit_string(iommu_gart_bitmap, offset, size); | |
93 | next_bit = offset+size; | |
94 | if (next_bit >= iommu_pages) { | |
95 | next_bit = 0; | |
96 | need_flush = 1; | |
97 | } | |
98 | } | |
99 | if (iommu_fullflush) | |
100 | need_flush = 1; | |
101 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
102 | return offset; | |
103 | } | |
104 | ||
105 | static void free_iommu(unsigned long offset, int size) | |
106 | { | |
107 | unsigned long flags; | |
1da177e4 LT |
108 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
109 | __clear_bit_string(iommu_gart_bitmap, offset, size); | |
110 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
111 | } | |
112 | ||
113 | /* | |
114 | * Use global flush state to avoid races with multiple flushers. | |
115 | */ | |
a32073bf | 116 | static void flush_gart(void) |
1da177e4 LT |
117 | { |
118 | unsigned long flags; | |
1da177e4 | 119 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
a32073bf AK |
120 | if (need_flush) { |
121 | k8_flush_garts(); | |
1da177e4 LT |
122 | need_flush = 0; |
123 | } | |
124 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | |
125 | } | |
126 | ||
1da177e4 LT |
127 | #ifdef CONFIG_IOMMU_LEAK |
128 | ||
129 | #define SET_LEAK(x) if (iommu_leak_tab) \ | |
130 | iommu_leak_tab[x] = __builtin_return_address(0); | |
131 | #define CLEAR_LEAK(x) if (iommu_leak_tab) \ | |
132 | iommu_leak_tab[x] = NULL; | |
133 | ||
134 | /* Debugging aid for drivers that don't free their IOMMU tables */ | |
135 | static void **iommu_leak_tab; | |
136 | static int leak_trace; | |
137 | int iommu_leak_pages = 20; | |
138 | void dump_leak(void) | |
139 | { | |
140 | int i; | |
141 | static int dump; | |
142 | if (dump || !iommu_leak_tab) return; | |
143 | dump = 1; | |
144 | show_stack(NULL,NULL); | |
145 | /* Very crude. dump some from the end of the table too */ | |
146 | printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); | |
147 | for (i = 0; i < iommu_leak_pages; i+=2) { | |
148 | printk("%lu: ", iommu_pages-i); | |
149 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]); | |
150 | printk("%c", (i+1)%2 == 0 ? '\n' : ' '); | |
151 | } | |
152 | printk("\n"); | |
153 | } | |
154 | #else | |
155 | #define SET_LEAK(x) | |
156 | #define CLEAR_LEAK(x) | |
157 | #endif | |
158 | ||
17a941d8 | 159 | static void iommu_full(struct device *dev, size_t size, int dir) |
1da177e4 LT |
160 | { |
161 | /* | |
162 | * Ran out of IOMMU space for this operation. This is very bad. | |
163 | * Unfortunately the drivers cannot handle this operation properly. | |
164 | * Return some non mapped prereserved space in the aperture and | |
165 | * let the Northbridge deal with it. This will result in garbage | |
166 | * in the IO operation. When the size exceeds the prereserved space | |
167 | * memory corruption will occur or random memory will be DMAed | |
168 | * out. Hopefully no network devices use single mappings that big. | |
169 | */ | |
170 | ||
171 | printk(KERN_ERR | |
172 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | |
173 | size, dev->bus_id); | |
174 | ||
17a941d8 | 175 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
1da177e4 LT |
176 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
177 | panic("PCI-DMA: Memory would be corrupted\n"); | |
178 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) | |
17a941d8 | 179 | panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n"); |
1da177e4 LT |
180 | } |
181 | ||
182 | #ifdef CONFIG_IOMMU_LEAK | |
183 | dump_leak(); | |
184 | #endif | |
185 | } | |
186 | ||
187 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) | |
188 | { | |
189 | u64 mask = *dev->dma_mask; | |
00edefae | 190 | int high = addr + size > mask; |
1da177e4 LT |
191 | int mmu = high; |
192 | if (force_iommu) | |
193 | mmu = 1; | |
1da177e4 LT |
194 | return mmu; |
195 | } | |
196 | ||
197 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |
198 | { | |
199 | u64 mask = *dev->dma_mask; | |
00edefae | 200 | int high = addr + size > mask; |
1da177e4 | 201 | int mmu = high; |
1da177e4 LT |
202 | return mmu; |
203 | } | |
204 | ||
205 | /* Map a single continuous physical area into the IOMMU. | |
206 | * Caller needs to check if the iommu is needed and flush. | |
207 | */ | |
17a941d8 MBY |
208 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
209 | size_t size, int dir) | |
1da177e4 LT |
210 | { |
211 | unsigned long npages = to_pages(phys_mem, size); | |
212 | unsigned long iommu_page = alloc_iommu(npages); | |
213 | int i; | |
214 | if (iommu_page == -1) { | |
215 | if (!nonforced_iommu(dev, phys_mem, size)) | |
216 | return phys_mem; | |
217 | if (panic_on_overflow) | |
218 | panic("dma_map_area overflow %lu bytes\n", size); | |
17a941d8 | 219 | iommu_full(dev, size, dir); |
1da177e4 LT |
220 | return bad_dma_address; |
221 | } | |
222 | ||
223 | for (i = 0; i < npages; i++) { | |
224 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | |
225 | SET_LEAK(iommu_page + i); | |
226 | phys_mem += PAGE_SIZE; | |
227 | } | |
228 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | |
229 | } | |
230 | ||
17a941d8 MBY |
231 | static dma_addr_t gart_map_simple(struct device *dev, char *buf, |
232 | size_t size, int dir) | |
233 | { | |
234 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); | |
a32073bf | 235 | flush_gart(); |
17a941d8 MBY |
236 | return map; |
237 | } | |
238 | ||
1da177e4 | 239 | /* Map a single area into the IOMMU */ |
1048fa52 | 240 | static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) |
1da177e4 LT |
241 | { |
242 | unsigned long phys_mem, bus; | |
243 | ||
1da177e4 LT |
244 | if (!dev) |
245 | dev = &fallback_dev; | |
246 | ||
247 | phys_mem = virt_to_phys(addr); | |
248 | if (!need_iommu(dev, phys_mem, size)) | |
249 | return phys_mem; | |
250 | ||
17a941d8 | 251 | bus = gart_map_simple(dev, addr, size, dir); |
1da177e4 | 252 | return bus; |
17a941d8 MBY |
253 | } |
254 | ||
7c2d9cd2 JM |
255 | /* |
256 | * Free a DMA mapping. | |
257 | */ | |
1048fa52 | 258 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
7c2d9cd2 JM |
259 | size_t size, int direction) |
260 | { | |
261 | unsigned long iommu_page; | |
262 | int npages; | |
263 | int i; | |
264 | ||
265 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | |
266 | dma_addr >= iommu_bus_base + iommu_size) | |
267 | return; | |
268 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | |
269 | npages = to_pages(dma_addr, size); | |
270 | for (i = 0; i < npages; i++) { | |
271 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | |
272 | CLEAR_LEAK(iommu_page + i); | |
273 | } | |
274 | free_iommu(iommu_page, npages); | |
275 | } | |
276 | ||
17a941d8 MBY |
277 | /* |
278 | * Wrapper for pci_unmap_single working with scatterlists. | |
279 | */ | |
1048fa52 | 280 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
17a941d8 | 281 | { |
9ee1bea4 | 282 | struct scatterlist *s; |
17a941d8 MBY |
283 | int i; |
284 | ||
9ee1bea4 | 285 | for_each_sg(sg, s, nents, i) { |
60b08c67 | 286 | if (!s->dma_length || !s->length) |
17a941d8 | 287 | break; |
7c2d9cd2 | 288 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
17a941d8 MBY |
289 | } |
290 | } | |
1da177e4 LT |
291 | |
292 | /* Fallback for dma_map_sg in case of overflow */ | |
293 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |
294 | int nents, int dir) | |
295 | { | |
9ee1bea4 | 296 | struct scatterlist *s; |
1da177e4 LT |
297 | int i; |
298 | ||
299 | #ifdef CONFIG_IOMMU_DEBUG | |
300 | printk(KERN_DEBUG "dma_map_sg overflow\n"); | |
301 | #endif | |
302 | ||
9ee1bea4 | 303 | for_each_sg(sg, s, nents, i) { |
1da177e4 LT |
304 | unsigned long addr = page_to_phys(s->page) + s->offset; |
305 | if (nonforced_iommu(dev, addr, s->length)) { | |
17a941d8 | 306 | addr = dma_map_area(dev, addr, s->length, dir); |
1da177e4 LT |
307 | if (addr == bad_dma_address) { |
308 | if (i > 0) | |
17a941d8 | 309 | gart_unmap_sg(dev, sg, i, dir); |
1da177e4 LT |
310 | nents = 0; |
311 | sg[0].dma_length = 0; | |
312 | break; | |
313 | } | |
314 | } | |
315 | s->dma_address = addr; | |
316 | s->dma_length = s->length; | |
317 | } | |
a32073bf | 318 | flush_gart(); |
1da177e4 LT |
319 | return nents; |
320 | } | |
321 | ||
322 | /* Map multiple scatterlist entries continuous into the first. */ | |
9ee1bea4 | 323 | static int __dma_map_cont(struct scatterlist *start, int nelems, |
1da177e4 LT |
324 | struct scatterlist *sout, unsigned long pages) |
325 | { | |
326 | unsigned long iommu_start = alloc_iommu(pages); | |
327 | unsigned long iommu_page = iommu_start; | |
9ee1bea4 | 328 | struct scatterlist *s; |
1da177e4 LT |
329 | int i; |
330 | ||
331 | if (iommu_start == -1) | |
332 | return -1; | |
9ee1bea4 JA |
333 | |
334 | for_each_sg(start, s, nelems, i) { | |
1da177e4 LT |
335 | unsigned long pages, addr; |
336 | unsigned long phys_addr = s->dma_address; | |
337 | ||
9ee1bea4 JA |
338 | BUG_ON(s != start && s->offset); |
339 | if (s == start) { | |
60b08c67 | 340 | *sout = *s; |
1da177e4 LT |
341 | sout->dma_address = iommu_bus_base; |
342 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | |
343 | sout->dma_length = s->length; | |
344 | } else { | |
345 | sout->dma_length += s->length; | |
346 | } | |
347 | ||
348 | addr = phys_addr; | |
349 | pages = to_pages(s->offset, s->length); | |
350 | while (pages--) { | |
351 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | |
352 | SET_LEAK(iommu_page); | |
353 | addr += PAGE_SIZE; | |
354 | iommu_page++; | |
0d541064 | 355 | } |
1da177e4 LT |
356 | } |
357 | BUG_ON(iommu_page - iommu_start != pages); | |
358 | return 0; | |
359 | } | |
360 | ||
9ee1bea4 | 361 | static inline int dma_map_cont(struct scatterlist *start, int nelems, |
1da177e4 LT |
362 | struct scatterlist *sout, |
363 | unsigned long pages, int need) | |
364 | { | |
9ee1bea4 JA |
365 | if (!need) { |
366 | BUG_ON(nelems != 1); | |
367 | *sout = *start; | |
368 | sout->dma_length = start->length; | |
1da177e4 | 369 | return 0; |
9ee1bea4 JA |
370 | } |
371 | return __dma_map_cont(start, nelems, sout, pages); | |
1da177e4 LT |
372 | } |
373 | ||
374 | /* | |
375 | * DMA map all entries in a scatterlist. | |
376 | * Merge chunks that have page aligned sizes into a continuous mapping. | |
377 | */ | |
17a941d8 | 378 | int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
1da177e4 LT |
379 | { |
380 | int i; | |
381 | int out; | |
382 | int start; | |
383 | unsigned long pages = 0; | |
384 | int need = 0, nextneed; | |
9ee1bea4 | 385 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
1da177e4 | 386 | |
1da177e4 LT |
387 | if (nents == 0) |
388 | return 0; | |
389 | ||
1da177e4 LT |
390 | if (!dev) |
391 | dev = &fallback_dev; | |
392 | ||
393 | out = 0; | |
394 | start = 0; | |
9ee1bea4 JA |
395 | start_sg = sgmap = sg; |
396 | ps = NULL; /* shut up gcc */ | |
397 | for_each_sg(sg, s, nents, i) { | |
1da177e4 LT |
398 | dma_addr_t addr = page_to_phys(s->page) + s->offset; |
399 | s->dma_address = addr; | |
400 | BUG_ON(s->length == 0); | |
401 | ||
402 | nextneed = need_iommu(dev, addr, s->length); | |
403 | ||
404 | /* Handle the previous not yet processed entries */ | |
405 | if (i > start) { | |
1da177e4 LT |
406 | /* Can only merge when the last chunk ends on a page |
407 | boundary and the new one doesn't have an offset. */ | |
408 | if (!iommu_merge || !nextneed || !need || s->offset || | |
9ee1bea4 JA |
409 | (ps->offset + ps->length) % PAGE_SIZE) { |
410 | if (dma_map_cont(start_sg, i - start, sgmap, | |
411 | pages, need) < 0) | |
1da177e4 LT |
412 | goto error; |
413 | out++; | |
9ee1bea4 | 414 | sgmap = sg_next(sgmap); |
1da177e4 | 415 | pages = 0; |
9ee1bea4 JA |
416 | start = i; |
417 | start_sg = s; | |
1da177e4 LT |
418 | } |
419 | } | |
420 | ||
421 | need = nextneed; | |
422 | pages += to_pages(s->offset, s->length); | |
9ee1bea4 | 423 | ps = s; |
1da177e4 | 424 | } |
9ee1bea4 | 425 | if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) |
1da177e4 LT |
426 | goto error; |
427 | out++; | |
a32073bf | 428 | flush_gart(); |
9ee1bea4 JA |
429 | if (out < nents) { |
430 | sgmap = sg_next(sgmap); | |
431 | sgmap->dma_length = 0; | |
432 | } | |
1da177e4 LT |
433 | return out; |
434 | ||
435 | error: | |
a32073bf | 436 | flush_gart(); |
17a941d8 | 437 | gart_unmap_sg(dev, sg, nents, dir); |
a1002a48 KV |
438 | /* When it was forced or merged try again in a dumb way */ |
439 | if (force_iommu || iommu_merge) { | |
440 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | |
441 | if (out > 0) | |
442 | return out; | |
443 | } | |
1da177e4 LT |
444 | if (panic_on_overflow) |
445 | panic("dma_map_sg: overflow on %lu pages\n", pages); | |
17a941d8 | 446 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
9ee1bea4 JA |
447 | for_each_sg(sg, s, nents, i) |
448 | s->dma_address = bad_dma_address; | |
1da177e4 LT |
449 | return 0; |
450 | } | |
451 | ||
17a941d8 | 452 | static int no_agp; |
1da177e4 LT |
453 | |
454 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | |
455 | { | |
456 | unsigned long a; | |
457 | if (!iommu_size) { | |
458 | iommu_size = aper_size; | |
459 | if (!no_agp) | |
460 | iommu_size /= 2; | |
461 | } | |
462 | ||
463 | a = aper + iommu_size; | |
464 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; | |
465 | ||
466 | if (iommu_size < 64*1024*1024) | |
467 | printk(KERN_WARNING | |
468 | "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); | |
469 | ||
470 | return iommu_size; | |
471 | } | |
472 | ||
473 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | |
474 | { | |
475 | unsigned aper_size = 0, aper_base_32; | |
476 | u64 aper_base; | |
477 | unsigned aper_order; | |
478 | ||
479 | pci_read_config_dword(dev, 0x94, &aper_base_32); | |
480 | pci_read_config_dword(dev, 0x90, &aper_order); | |
481 | aper_order = (aper_order >> 1) & 7; | |
482 | ||
483 | aper_base = aper_base_32 & 0x7fff; | |
484 | aper_base <<= 25; | |
485 | ||
486 | aper_size = (32 * 1024 * 1024) << aper_order; | |
547c5355 | 487 | if (aper_base + aper_size > 0x100000000UL || !aper_size) |
1da177e4 LT |
488 | aper_base = 0; |
489 | ||
490 | *size = aper_size; | |
491 | return aper_base; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Private Northbridge GATT initialization in case we cannot use the | |
496 | * AGP driver for some reason. | |
497 | */ | |
498 | static __init int init_k8_gatt(struct agp_kern_info *info) | |
499 | { | |
500 | struct pci_dev *dev; | |
501 | void *gatt; | |
502 | unsigned aper_base, new_aper_base; | |
503 | unsigned aper_size, gatt_size, new_aper_size; | |
a32073bf AK |
504 | int i; |
505 | ||
1da177e4 LT |
506 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
507 | aper_size = aper_base = info->aper_size = 0; | |
a32073bf AK |
508 | dev = NULL; |
509 | for (i = 0; i < num_k8_northbridges; i++) { | |
510 | dev = k8_northbridges[i]; | |
1da177e4 LT |
511 | new_aper_base = read_aperture(dev, &new_aper_size); |
512 | if (!new_aper_base) | |
513 | goto nommu; | |
514 | ||
515 | if (!aper_base) { | |
516 | aper_size = new_aper_size; | |
517 | aper_base = new_aper_base; | |
518 | } | |
519 | if (aper_size != new_aper_size || aper_base != new_aper_base) | |
520 | goto nommu; | |
521 | } | |
522 | if (!aper_base) | |
523 | goto nommu; | |
524 | info->aper_base = aper_base; | |
525 | info->aper_size = aper_size>>20; | |
526 | ||
527 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | |
528 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | |
529 | if (!gatt) | |
cf6387da JD |
530 | panic("Cannot allocate GATT table"); |
531 | if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE)) | |
532 | panic("Could not set GART PTEs to uncacheable pages"); | |
533 | global_flush_tlb(); | |
534 | ||
1da177e4 LT |
535 | memset(gatt, 0, gatt_size); |
536 | agp_gatt_table = gatt; | |
a32073bf AK |
537 | |
538 | for (i = 0; i < num_k8_northbridges; i++) { | |
1da177e4 LT |
539 | u32 ctl; |
540 | u32 gatt_reg; | |
541 | ||
a32073bf | 542 | dev = k8_northbridges[i]; |
1da177e4 LT |
543 | gatt_reg = __pa(gatt) >> 12; |
544 | gatt_reg <<= 4; | |
545 | pci_write_config_dword(dev, 0x98, gatt_reg); | |
546 | pci_read_config_dword(dev, 0x90, &ctl); | |
547 | ||
548 | ctl |= 1; | |
549 | ctl &= ~((1<<4) | (1<<5)); | |
550 | ||
551 | pci_write_config_dword(dev, 0x90, ctl); | |
552 | } | |
a32073bf | 553 | flush_gart(); |
1da177e4 LT |
554 | |
555 | printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); | |
556 | return 0; | |
557 | ||
558 | nommu: | |
559 | /* Should not happen anymore */ | |
560 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" | |
f46ace69 | 561 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
1da177e4 LT |
562 | return -1; |
563 | } | |
564 | ||
565 | extern int agp_amd64_init(void); | |
566 | ||
e6584504 | 567 | static const struct dma_mapping_ops gart_dma_ops = { |
17a941d8 MBY |
568 | .mapping_error = NULL, |
569 | .map_single = gart_map_single, | |
570 | .map_simple = gart_map_simple, | |
571 | .unmap_single = gart_unmap_single, | |
572 | .sync_single_for_cpu = NULL, | |
573 | .sync_single_for_device = NULL, | |
574 | .sync_single_range_for_cpu = NULL, | |
575 | .sync_single_range_for_device = NULL, | |
576 | .sync_sg_for_cpu = NULL, | |
577 | .sync_sg_for_device = NULL, | |
578 | .map_sg = gart_map_sg, | |
579 | .unmap_sg = gart_unmap_sg, | |
580 | }; | |
581 | ||
bc2cea6a YL |
582 | void gart_iommu_shutdown(void) |
583 | { | |
584 | struct pci_dev *dev; | |
585 | int i; | |
586 | ||
587 | if (no_agp && (dma_ops != &gart_dma_ops)) | |
588 | return; | |
589 | ||
590 | for (i = 0; i < num_k8_northbridges; i++) { | |
591 | u32 ctl; | |
592 | ||
593 | dev = k8_northbridges[i]; | |
594 | pci_read_config_dword(dev, 0x90, &ctl); | |
595 | ||
596 | ctl &= ~1; | |
597 | ||
598 | pci_write_config_dword(dev, 0x90, ctl); | |
599 | } | |
600 | } | |
601 | ||
0dc243ae | 602 | void __init gart_iommu_init(void) |
1da177e4 LT |
603 | { |
604 | struct agp_kern_info info; | |
605 | unsigned long aper_size; | |
606 | unsigned long iommu_start; | |
1da177e4 LT |
607 | unsigned long scratch; |
608 | long i; | |
609 | ||
a32073bf AK |
610 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
611 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); | |
0dc243ae | 612 | return; |
a32073bf AK |
613 | } |
614 | ||
1da177e4 LT |
615 | #ifndef CONFIG_AGP_AMD64 |
616 | no_agp = 1; | |
617 | #else | |
618 | /* Makefile puts PCI initialization via subsys_initcall first. */ | |
619 | /* Add other K8 AGP bridge drivers here */ | |
620 | no_agp = no_agp || | |
621 | (agp_amd64_init() < 0) || | |
622 | (agp_copy_info(agp_bridge, &info) < 0); | |
623 | #endif | |
624 | ||
60b08c67 | 625 | if (swiotlb) |
0dc243ae | 626 | return; |
60b08c67 | 627 | |
8d4f6b93 JM |
628 | /* Did we detect a different HW IOMMU? */ |
629 | if (iommu_detected && !iommu_aperture) | |
0dc243ae | 630 | return; |
8d4f6b93 | 631 | |
1da177e4 | 632 | if (no_iommu || |
17a941d8 | 633 | (!force_iommu && end_pfn <= MAX_DMA32_PFN) || |
1da177e4 LT |
634 | !iommu_aperture || |
635 | (no_agp && init_k8_gatt(&info) < 0)) { | |
5b7b644c JM |
636 | if (end_pfn > MAX_DMA32_PFN) { |
637 | printk(KERN_ERR "WARNING more than 4GB of memory " | |
3807fd46 | 638 | "but GART IOMMU not available.\n" |
dc9a7195 | 639 | KERN_ERR "WARNING 32bit PCI may malfunction.\n"); |
5b7b644c | 640 | } |
0dc243ae | 641 | return; |
1da177e4 LT |
642 | } |
643 | ||
5b7b644c | 644 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
1da177e4 LT |
645 | aper_size = info.aper_size * 1024 * 1024; |
646 | iommu_size = check_iommu_size(info.aper_base, aper_size); | |
647 | iommu_pages = iommu_size >> PAGE_SHIFT; | |
648 | ||
649 | iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, | |
650 | get_order(iommu_pages/8)); | |
651 | if (!iommu_gart_bitmap) | |
652 | panic("Cannot allocate iommu bitmap\n"); | |
653 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | |
654 | ||
655 | #ifdef CONFIG_IOMMU_LEAK | |
656 | if (leak_trace) { | |
657 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | |
658 | get_order(iommu_pages*sizeof(void *))); | |
659 | if (iommu_leak_tab) | |
660 | memset(iommu_leak_tab, 0, iommu_pages * 8); | |
661 | else | |
662 | printk("PCI-DMA: Cannot allocate leak trace area\n"); | |
663 | } | |
664 | #endif | |
665 | ||
666 | /* | |
667 | * Out of IOMMU space handling. | |
668 | * Reserve some invalid pages at the beginning of the GART. | |
669 | */ | |
670 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | |
671 | ||
672 | agp_memory_reserved = iommu_size; | |
673 | printk(KERN_INFO | |
674 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | |
675 | iommu_size>>20); | |
676 | ||
677 | iommu_start = aper_size - iommu_size; | |
678 | iommu_bus_base = info.aper_base + iommu_start; | |
679 | bad_dma_address = iommu_bus_base; | |
680 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | |
681 | ||
682 | /* | |
683 | * Unmap the IOMMU part of the GART. The alias of the page is | |
684 | * always mapped with cache enabled and there is no full cache | |
685 | * coherency across the GART remapping. The unmapping avoids | |
686 | * automatic prefetches from the CPU allocating cache lines in | |
687 | * there. All CPU accesses are done via the direct mapping to | |
688 | * the backing memory. The GART address is only used by PCI | |
689 | * devices. | |
690 | */ | |
691 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); | |
692 | ||
693 | /* | |
694 | * Try to workaround a bug (thanks to BenH) | |
695 | * Set unmapped entries to a scratch page instead of 0. | |
696 | * Any prefetches that hit unmapped entries won't get an bus abort | |
697 | * then. | |
698 | */ | |
699 | scratch = get_zeroed_page(GFP_KERNEL); | |
700 | if (!scratch) | |
701 | panic("Cannot allocate iommu scratch page"); | |
702 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | |
703 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) | |
704 | iommu_gatt_base[i] = gart_unmapped_entry; | |
705 | ||
a32073bf | 706 | flush_gart(); |
17a941d8 | 707 | dma_ops = &gart_dma_ops; |
1da177e4 LT |
708 | } |
709 | ||
43999d9e | 710 | void __init gart_parse_options(char *p) |
17a941d8 MBY |
711 | { |
712 | int arg; | |
713 | ||
1da177e4 | 714 | #ifdef CONFIG_IOMMU_LEAK |
17a941d8 MBY |
715 | if (!strncmp(p,"leak",4)) { |
716 | leak_trace = 1; | |
717 | p += 4; | |
718 | if (*p == '=') ++p; | |
719 | if (isdigit(*p) && get_option(&p, &arg)) | |
720 | iommu_leak_pages = arg; | |
721 | } | |
1da177e4 | 722 | #endif |
17a941d8 MBY |
723 | if (isdigit(*p) && get_option(&p, &arg)) |
724 | iommu_size = arg; | |
725 | if (!strncmp(p, "fullflush",8)) | |
726 | iommu_fullflush = 1; | |
727 | if (!strncmp(p, "nofullflush",11)) | |
728 | iommu_fullflush = 0; | |
729 | if (!strncmp(p,"noagp",5)) | |
730 | no_agp = 1; | |
731 | if (!strncmp(p, "noaperture",10)) | |
732 | fix_aperture = 0; | |
733 | /* duplicated from pci-dma.c */ | |
734 | if (!strncmp(p,"force",5)) | |
735 | iommu_aperture_allowed = 1; | |
736 | if (!strncmp(p,"allowed",7)) | |
737 | iommu_aperture_allowed = 1; | |
738 | if (!strncmp(p, "memaper", 7)) { | |
739 | fallback_aper_force = 1; | |
740 | p += 7; | |
741 | if (*p == '=') { | |
742 | ++p; | |
743 | if (get_option(&p, &arg)) | |
744 | fallback_aper_order = arg; | |
745 | } | |
746 | } | |
747 | } |