2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
41 #include <asm/kdump.h>
42 #include <asm/fadump.h>
48 static void __iommu_free(struct iommu_table
*, dma_addr_t
, unsigned int);
50 static int __init
setup_iommu(char *str
)
52 if (!strcmp(str
, "novmerge"))
54 else if (!strcmp(str
, "vmerge"))
59 __setup("iommu=", setup_iommu
);
61 static unsigned long iommu_range_alloc(struct device
*dev
,
62 struct iommu_table
*tbl
,
64 unsigned long *handle
,
66 unsigned int align_order
)
68 unsigned long n
, end
, start
;
70 int largealloc
= npages
> 15;
72 unsigned long align_mask
;
73 unsigned long boundary_size
;
75 align_mask
= 0xffffffffffffffffl
>> (64 - align_order
);
77 /* This allocator was derived from x86_64's bit string search */
80 if (unlikely(npages
== 0)) {
81 if (printk_ratelimit())
83 return DMA_ERROR_CODE
;
86 if (handle
&& *handle
)
89 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
91 /* Use only half of the table for small allocs (15 pages or less) */
92 limit
= largealloc
? tbl
->it_size
: tbl
->it_halfpoint
;
94 if (largealloc
&& start
< tbl
->it_halfpoint
)
95 start
= tbl
->it_halfpoint
;
97 /* The case below can happen if we have a small segment appended
98 * to a large, or when the previous alloc was at the very end of
99 * the available space. If so, go back to the initial start.
102 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
106 if (limit
+ tbl
->it_offset
> mask
) {
107 limit
= mask
- tbl
->it_offset
+ 1;
108 /* If we're constrained on address range, first try
109 * at the masked hint to avoid O(n) search complexity,
110 * but on second pass, start at 0.
112 if ((start
& mask
) >= limit
|| pass
> 0)
119 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
120 1 << IOMMU_PAGE_SHIFT
);
122 boundary_size
= ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT
);
123 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
125 n
= iommu_area_alloc(tbl
->it_map
, limit
, start
, npages
,
126 tbl
->it_offset
, boundary_size
>> IOMMU_PAGE_SHIFT
,
129 if (likely(pass
< 2)) {
130 /* First failure, just rescan the half of the table.
131 * Second failure, rescan the other half of the table.
133 start
= (largealloc
^ pass
) ? tbl
->it_halfpoint
: 0;
134 limit
= pass
? tbl
->it_size
: limit
;
138 /* Third failure, give up */
139 return DMA_ERROR_CODE
;
145 /* Bump the hint to a new block for small allocs. */
147 /* Don't bump to new block to avoid fragmentation */
148 tbl
->it_largehint
= end
;
150 /* Overflow will be taken care of at the next allocation */
151 tbl
->it_hint
= (end
+ tbl
->it_blocksize
- 1) &
152 ~(tbl
->it_blocksize
- 1);
155 /* Update handle for SG allocations */
162 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
163 void *page
, unsigned int npages
,
164 enum dma_data_direction direction
,
165 unsigned long mask
, unsigned int align_order
,
166 struct dma_attrs
*attrs
)
168 unsigned long entry
, flags
;
169 dma_addr_t ret
= DMA_ERROR_CODE
;
172 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
174 entry
= iommu_range_alloc(dev
, tbl
, npages
, NULL
, mask
, align_order
);
176 if (unlikely(entry
== DMA_ERROR_CODE
)) {
177 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
178 return DMA_ERROR_CODE
;
181 entry
+= tbl
->it_offset
; /* Offset into real TCE table */
182 ret
= entry
<< IOMMU_PAGE_SHIFT
; /* Set the return dma address */
184 /* Put the TCEs in the HW table */
185 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
186 (unsigned long)page
& IOMMU_PAGE_MASK
,
189 /* ppc_md.tce_build() only returns non-zero for transient errors.
190 * Clean up the table bitmap in this case and return
191 * DMA_ERROR_CODE. For all other errors the functionality is
194 if (unlikely(build_fail
)) {
195 __iommu_free(tbl
, ret
, npages
);
197 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
198 return DMA_ERROR_CODE
;
201 /* Flush/invalidate TLB caches if necessary */
202 if (ppc_md
.tce_flush
)
203 ppc_md
.tce_flush(tbl
);
205 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
207 /* Make sure updates are seen by hardware */
213 static void __iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
216 unsigned long entry
, free_entry
;
218 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
219 free_entry
= entry
- tbl
->it_offset
;
221 if (((free_entry
+ npages
) > tbl
->it_size
) ||
222 (entry
< tbl
->it_offset
)) {
223 if (printk_ratelimit()) {
224 printk(KERN_INFO
"iommu_free: invalid entry\n");
225 printk(KERN_INFO
"\tentry = 0x%lx\n", entry
);
226 printk(KERN_INFO
"\tdma_addr = 0x%llx\n", (u64
)dma_addr
);
227 printk(KERN_INFO
"\tTable = 0x%llx\n", (u64
)tbl
);
228 printk(KERN_INFO
"\tbus# = 0x%llx\n", (u64
)tbl
->it_busno
);
229 printk(KERN_INFO
"\tsize = 0x%llx\n", (u64
)tbl
->it_size
);
230 printk(KERN_INFO
"\tstartOff = 0x%llx\n", (u64
)tbl
->it_offset
);
231 printk(KERN_INFO
"\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
237 ppc_md
.tce_free(tbl
, entry
, npages
);
238 bitmap_clear(tbl
->it_map
, free_entry
, npages
);
241 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
246 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
248 __iommu_free(tbl
, dma_addr
, npages
);
250 /* Make sure TLB cache is flushed if the HW needs it. We do
251 * not do an mb() here on purpose, it is not needed on any of
252 * the current platforms.
254 if (ppc_md
.tce_flush
)
255 ppc_md
.tce_flush(tbl
);
257 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
260 int iommu_map_sg(struct device
*dev
, struct iommu_table
*tbl
,
261 struct scatterlist
*sglist
, int nelems
,
262 unsigned long mask
, enum dma_data_direction direction
,
263 struct dma_attrs
*attrs
)
265 dma_addr_t dma_next
= 0, dma_addr
;
267 struct scatterlist
*s
, *outs
, *segstart
;
268 int outcount
, incount
, i
, build_fail
= 0;
270 unsigned long handle
;
271 unsigned int max_seg_size
;
273 BUG_ON(direction
== DMA_NONE
);
275 if ((nelems
== 0) || !tbl
)
278 outs
= s
= segstart
= &sglist
[0];
283 /* Init first segment length for backout at failure */
284 outs
->dma_length
= 0;
286 DBG("sg mapping %d elements:\n", nelems
);
288 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
290 max_seg_size
= dma_get_max_seg_size(dev
);
291 for_each_sg(sglist
, s
, nelems
, i
) {
292 unsigned long vaddr
, npages
, entry
, slen
;
300 /* Allocate iommu entries for that segment */
301 vaddr
= (unsigned long) sg_virt(s
);
302 npages
= iommu_num_pages(vaddr
, slen
, IOMMU_PAGE_SIZE
);
304 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& slen
>= PAGE_SIZE
&&
305 (vaddr
& ~PAGE_MASK
) == 0)
306 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
307 entry
= iommu_range_alloc(dev
, tbl
, npages
, &handle
,
308 mask
>> IOMMU_PAGE_SHIFT
, align
);
310 DBG(" - vaddr: %lx, size: %lx\n", vaddr
, slen
);
313 if (unlikely(entry
== DMA_ERROR_CODE
)) {
314 if (printk_ratelimit())
315 dev_info(dev
, "iommu_alloc failed, tbl %p "
316 "vaddr %lx npages %lu\n", tbl
, vaddr
,
321 /* Convert entry to a dma_addr_t */
322 entry
+= tbl
->it_offset
;
323 dma_addr
= entry
<< IOMMU_PAGE_SHIFT
;
324 dma_addr
|= (s
->offset
& ~IOMMU_PAGE_MASK
);
326 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
327 npages
, entry
, dma_addr
);
329 /* Insert into HW table */
330 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
331 vaddr
& IOMMU_PAGE_MASK
,
333 if(unlikely(build_fail
))
336 /* If we are in an open segment, try merging */
338 DBG(" - trying merge...\n");
339 /* We cannot merge if:
340 * - allocated dma_addr isn't contiguous to previous allocation
342 if (novmerge
|| (dma_addr
!= dma_next
) ||
343 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
344 /* Can't merge: create a new segment */
347 outs
= sg_next(outs
);
348 DBG(" can't merge, new segment.\n");
350 outs
->dma_length
+= s
->length
;
351 DBG(" merged, new len: %ux\n", outs
->dma_length
);
356 /* This is a new segment, fill entries */
357 DBG(" - filling new segment.\n");
358 outs
->dma_address
= dma_addr
;
359 outs
->dma_length
= slen
;
362 /* Calculate next page pointer for contiguous check */
363 dma_next
= dma_addr
+ slen
;
365 DBG(" - dma next is: %lx\n", dma_next
);
368 /* Flush/invalidate TLB caches if necessary */
369 if (ppc_md
.tce_flush
)
370 ppc_md
.tce_flush(tbl
);
372 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
374 DBG("mapped %d elements:\n", outcount
);
376 /* For the sake of iommu_unmap_sg, we clear out the length in the
377 * next entry of the sglist if we didn't fill the list completely
379 if (outcount
< incount
) {
380 outs
= sg_next(outs
);
381 outs
->dma_address
= DMA_ERROR_CODE
;
382 outs
->dma_length
= 0;
385 /* Make sure updates are seen by hardware */
391 for_each_sg(sglist
, s
, nelems
, i
) {
392 if (s
->dma_length
!= 0) {
393 unsigned long vaddr
, npages
;
395 vaddr
= s
->dma_address
& IOMMU_PAGE_MASK
;
396 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
,
398 __iommu_free(tbl
, vaddr
, npages
);
399 s
->dma_address
= DMA_ERROR_CODE
;
405 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
410 void iommu_unmap_sg(struct iommu_table
*tbl
, struct scatterlist
*sglist
,
411 int nelems
, enum dma_data_direction direction
,
412 struct dma_attrs
*attrs
)
414 struct scatterlist
*sg
;
417 BUG_ON(direction
== DMA_NONE
);
422 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
427 dma_addr_t dma_handle
= sg
->dma_address
;
429 if (sg
->dma_length
== 0)
431 npages
= iommu_num_pages(dma_handle
, sg
->dma_length
,
433 __iommu_free(tbl
, dma_handle
, npages
);
437 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
438 * do not do an mb() here, the affected platforms do not need it
441 if (ppc_md
.tce_flush
)
442 ppc_md
.tce_flush(tbl
);
444 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
447 static void iommu_table_clear(struct iommu_table
*tbl
)
450 * In case of firmware assisted dump system goes through clean
451 * reboot process at the time of system crash. Hence it's safe to
452 * clear the TCE entries if firmware assisted dump is active.
454 if (!is_kdump_kernel() || is_fadump_active()) {
455 /* Clear the table in case firmware left allocations in it */
456 ppc_md
.tce_free(tbl
, tbl
->it_offset
, tbl
->it_size
);
460 #ifdef CONFIG_CRASH_DUMP
461 if (ppc_md
.tce_get
) {
462 unsigned long index
, tceval
, tcecount
= 0;
464 /* Reserve the existing mappings left by the first kernel. */
465 for (index
= 0; index
< tbl
->it_size
; index
++) {
466 tceval
= ppc_md
.tce_get(tbl
, index
+ tbl
->it_offset
);
468 * Freed TCE entry contains 0x7fffffffffffffff on JS20
470 if (tceval
&& (tceval
!= 0x7fffffffffffffffUL
)) {
471 __set_bit(index
, tbl
->it_map
);
476 if ((tbl
->it_size
- tcecount
) < KDUMP_MIN_TCE_ENTRIES
) {
477 printk(KERN_WARNING
"TCE table is full; freeing ");
478 printk(KERN_WARNING
"%d entries for the kdump boot\n",
479 KDUMP_MIN_TCE_ENTRIES
);
480 for (index
= tbl
->it_size
- KDUMP_MIN_TCE_ENTRIES
;
481 index
< tbl
->it_size
; index
++)
482 __clear_bit(index
, tbl
->it_map
);
489 * Build a iommu_table structure. This contains a bit map which
490 * is used to manage allocation of the tce space.
492 struct iommu_table
*iommu_init_table(struct iommu_table
*tbl
, int nid
)
495 static int welcomed
= 0;
498 /* Set aside 1/4 of the table for large allocations. */
499 tbl
->it_halfpoint
= tbl
->it_size
* 3 / 4;
501 /* number of bytes needed for the bitmap */
502 sz
= (tbl
->it_size
+ 7) >> 3;
504 page
= alloc_pages_node(nid
, GFP_ATOMIC
, get_order(sz
));
506 panic("iommu_init_table: Can't allocate %ld bytes\n", sz
);
507 tbl
->it_map
= page_address(page
);
508 memset(tbl
->it_map
, 0, sz
);
511 * Reserve page 0 so it will not be used for any mappings.
512 * This avoids buggy drivers that consider page 0 to be invalid
513 * to crash the machine or even lose data.
515 if (tbl
->it_offset
== 0)
516 set_bit(0, tbl
->it_map
);
519 tbl
->it_largehint
= tbl
->it_halfpoint
;
520 spin_lock_init(&tbl
->it_lock
);
522 iommu_table_clear(tbl
);
525 printk(KERN_INFO
"IOMMU table initialized, virtual merging %s\n",
526 novmerge
? "disabled" : "enabled");
533 void iommu_free_table(struct iommu_table
*tbl
, const char *node_name
)
535 unsigned long bitmap_sz
, i
;
538 if (!tbl
|| !tbl
->it_map
) {
539 printk(KERN_ERR
"%s: expected TCE map for %s\n", __func__
,
544 /* verify that table contains no entries */
545 /* it_size is in entries, and we're examining 64 at a time */
546 for (i
= 0; i
< (tbl
->it_size
/64); i
++) {
547 if (tbl
->it_map
[i
] != 0) {
548 printk(KERN_WARNING
"%s: Unexpected TCEs for %s\n",
549 __func__
, node_name
);
554 /* calculate bitmap size in bytes */
555 bitmap_sz
= (tbl
->it_size
+ 7) / 8;
558 order
= get_order(bitmap_sz
);
559 free_pages((unsigned long) tbl
->it_map
, order
);
565 /* Creates TCEs for a user provided buffer. The user buffer must be
566 * contiguous real kernel storage (not vmalloc). The address passed here
567 * comprises a page address and offset into that page. The dma_addr_t
568 * returned will point to the same byte within the page as was passed in.
570 dma_addr_t
iommu_map_page(struct device
*dev
, struct iommu_table
*tbl
,
571 struct page
*page
, unsigned long offset
, size_t size
,
572 unsigned long mask
, enum dma_data_direction direction
,
573 struct dma_attrs
*attrs
)
575 dma_addr_t dma_handle
= DMA_ERROR_CODE
;
578 unsigned int npages
, align
;
580 BUG_ON(direction
== DMA_NONE
);
582 vaddr
= page_address(page
) + offset
;
583 uaddr
= (unsigned long)vaddr
;
584 npages
= iommu_num_pages(uaddr
, size
, IOMMU_PAGE_SIZE
);
588 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& size
>= PAGE_SIZE
&&
589 ((unsigned long)vaddr
& ~PAGE_MASK
) == 0)
590 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
592 dma_handle
= iommu_alloc(dev
, tbl
, vaddr
, npages
, direction
,
593 mask
>> IOMMU_PAGE_SHIFT
, align
,
595 if (dma_handle
== DMA_ERROR_CODE
) {
596 if (printk_ratelimit()) {
597 dev_info(dev
, "iommu_alloc failed, tbl %p "
598 "vaddr %p npages %d\n", tbl
, vaddr
,
602 dma_handle
|= (uaddr
& ~IOMMU_PAGE_MASK
);
608 void iommu_unmap_page(struct iommu_table
*tbl
, dma_addr_t dma_handle
,
609 size_t size
, enum dma_data_direction direction
,
610 struct dma_attrs
*attrs
)
614 BUG_ON(direction
== DMA_NONE
);
617 npages
= iommu_num_pages(dma_handle
, size
, IOMMU_PAGE_SIZE
);
618 iommu_free(tbl
, dma_handle
, npages
);
622 /* Allocates a contiguous real buffer and creates mappings over it.
623 * Returns the virtual address of the buffer and sets dma_handle
624 * to the dma address (mapping) of the first page.
626 void *iommu_alloc_coherent(struct device
*dev
, struct iommu_table
*tbl
,
627 size_t size
, dma_addr_t
*dma_handle
,
628 unsigned long mask
, gfp_t flag
, int node
)
633 unsigned int nio_pages
, io_order
;
636 size
= PAGE_ALIGN(size
);
637 order
= get_order(size
);
640 * Client asked for way too much space. This is checked later
641 * anyway. It is easier to debug here for the drivers than in
644 if (order
>= IOMAP_MAX_ORDER
) {
645 dev_info(dev
, "iommu_alloc_consistent size too large: 0x%lx\n",
653 /* Alloc enough pages (and possibly more) */
654 page
= alloc_pages_node(node
, flag
, order
);
657 ret
= page_address(page
);
658 memset(ret
, 0, size
);
660 /* Set up tces to cover the allocated range */
661 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
662 io_order
= get_iommu_order(size
);
663 mapping
= iommu_alloc(dev
, tbl
, ret
, nio_pages
, DMA_BIDIRECTIONAL
,
664 mask
>> IOMMU_PAGE_SHIFT
, io_order
, NULL
);
665 if (mapping
== DMA_ERROR_CODE
) {
666 free_pages((unsigned long)ret
, order
);
669 *dma_handle
= mapping
;
673 void iommu_free_coherent(struct iommu_table
*tbl
, size_t size
,
674 void *vaddr
, dma_addr_t dma_handle
)
677 unsigned int nio_pages
;
679 size
= PAGE_ALIGN(size
);
680 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
681 iommu_free(tbl
, dma_handle
, nio_pages
);
682 size
= PAGE_ALIGN(size
);
683 free_pages((unsigned long)vaddr
, get_order(size
));