2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
41 #include <asm/kdump.h>
42 #include <asm/fadump.h>
48 static void __iommu_free(struct iommu_table
*, dma_addr_t
, unsigned int);
50 static int __init
setup_iommu(char *str
)
52 if (!strcmp(str
, "novmerge"))
54 else if (!strcmp(str
, "vmerge"))
59 __setup("iommu=", setup_iommu
);
61 static unsigned long iommu_range_alloc(struct device
*dev
,
62 struct iommu_table
*tbl
,
64 unsigned long *handle
,
66 unsigned int align_order
)
68 unsigned long n
, end
, start
;
70 int largealloc
= npages
> 15;
72 unsigned long align_mask
;
73 unsigned long boundary_size
;
76 align_mask
= 0xffffffffffffffffl
>> (64 - align_order
);
78 /* This allocator was derived from x86_64's bit string search */
81 if (unlikely(npages
== 0)) {
82 if (printk_ratelimit())
84 return DMA_ERROR_CODE
;
87 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
89 if (handle
&& *handle
)
92 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
94 /* Use only half of the table for small allocs (15 pages or less) */
95 limit
= largealloc
? tbl
->it_size
: tbl
->it_halfpoint
;
97 if (largealloc
&& start
< tbl
->it_halfpoint
)
98 start
= tbl
->it_halfpoint
;
100 /* The case below can happen if we have a small segment appended
101 * to a large, or when the previous alloc was at the very end of
102 * the available space. If so, go back to the initial start.
105 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
109 if (limit
+ tbl
->it_offset
> mask
) {
110 limit
= mask
- tbl
->it_offset
+ 1;
111 /* If we're constrained on address range, first try
112 * at the masked hint to avoid O(n) search complexity,
113 * but on second pass, start at 0.
115 if ((start
& mask
) >= limit
|| pass
> 0)
122 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
123 1 << IOMMU_PAGE_SHIFT
);
125 boundary_size
= ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT
);
126 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
128 n
= iommu_area_alloc(tbl
->it_map
, limit
, start
, npages
,
129 tbl
->it_offset
, boundary_size
>> IOMMU_PAGE_SHIFT
,
132 if (likely(pass
< 2)) {
133 /* First failure, just rescan the half of the table.
134 * Second failure, rescan the other half of the table.
136 start
= (largealloc
^ pass
) ? tbl
->it_halfpoint
: 0;
137 limit
= pass
? tbl
->it_size
: limit
;
141 /* Third failure, give up */
142 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
143 return DMA_ERROR_CODE
;
149 /* Bump the hint to a new block for small allocs. */
151 /* Don't bump to new block to avoid fragmentation */
152 tbl
->it_largehint
= end
;
154 /* Overflow will be taken care of at the next allocation */
155 tbl
->it_hint
= (end
+ tbl
->it_blocksize
- 1) &
156 ~(tbl
->it_blocksize
- 1);
159 /* Update handle for SG allocations */
163 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
167 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
168 void *page
, unsigned int npages
,
169 enum dma_data_direction direction
,
170 unsigned long mask
, unsigned int align_order
,
171 struct dma_attrs
*attrs
)
174 dma_addr_t ret
= DMA_ERROR_CODE
;
177 entry
= iommu_range_alloc(dev
, tbl
, npages
, NULL
, mask
, align_order
);
179 if (unlikely(entry
== DMA_ERROR_CODE
))
180 return DMA_ERROR_CODE
;
182 entry
+= tbl
->it_offset
; /* Offset into real TCE table */
183 ret
= entry
<< IOMMU_PAGE_SHIFT
; /* Set the return dma address */
185 /* Put the TCEs in the HW table */
186 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
187 (unsigned long)page
& IOMMU_PAGE_MASK
,
190 /* ppc_md.tce_build() only returns non-zero for transient errors.
191 * Clean up the table bitmap in this case and return
192 * DMA_ERROR_CODE. For all other errors the functionality is
195 if (unlikely(build_fail
)) {
196 __iommu_free(tbl
, ret
, npages
);
197 return DMA_ERROR_CODE
;
200 /* Flush/invalidate TLB caches if necessary */
201 if (ppc_md
.tce_flush
)
202 ppc_md
.tce_flush(tbl
);
204 /* Make sure updates are seen by hardware */
210 static bool iommu_free_check(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
213 unsigned long entry
, free_entry
;
215 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
216 free_entry
= entry
- tbl
->it_offset
;
218 if (((free_entry
+ npages
) > tbl
->it_size
) ||
219 (entry
< tbl
->it_offset
)) {
220 if (printk_ratelimit()) {
221 printk(KERN_INFO
"iommu_free: invalid entry\n");
222 printk(KERN_INFO
"\tentry = 0x%lx\n", entry
);
223 printk(KERN_INFO
"\tdma_addr = 0x%llx\n", (u64
)dma_addr
);
224 printk(KERN_INFO
"\tTable = 0x%llx\n", (u64
)tbl
);
225 printk(KERN_INFO
"\tbus# = 0x%llx\n", (u64
)tbl
->it_busno
);
226 printk(KERN_INFO
"\tsize = 0x%llx\n", (u64
)tbl
->it_size
);
227 printk(KERN_INFO
"\tstartOff = 0x%llx\n", (u64
)tbl
->it_offset
);
228 printk(KERN_INFO
"\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
238 static void __iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
241 unsigned long entry
, free_entry
;
244 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
245 free_entry
= entry
- tbl
->it_offset
;
247 if (!iommu_free_check(tbl
, dma_addr
, npages
))
250 ppc_md
.tce_free(tbl
, entry
, npages
);
252 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
253 bitmap_clear(tbl
->it_map
, free_entry
, npages
);
254 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
257 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
260 __iommu_free(tbl
, dma_addr
, npages
);
262 /* Make sure TLB cache is flushed if the HW needs it. We do
263 * not do an mb() here on purpose, it is not needed on any of
264 * the current platforms.
266 if (ppc_md
.tce_flush
)
267 ppc_md
.tce_flush(tbl
);
270 int iommu_map_sg(struct device
*dev
, struct iommu_table
*tbl
,
271 struct scatterlist
*sglist
, int nelems
,
272 unsigned long mask
, enum dma_data_direction direction
,
273 struct dma_attrs
*attrs
)
275 dma_addr_t dma_next
= 0, dma_addr
;
276 struct scatterlist
*s
, *outs
, *segstart
;
277 int outcount
, incount
, i
, build_fail
= 0;
279 unsigned long handle
;
280 unsigned int max_seg_size
;
282 BUG_ON(direction
== DMA_NONE
);
284 if ((nelems
== 0) || !tbl
)
287 outs
= s
= segstart
= &sglist
[0];
292 /* Init first segment length for backout at failure */
293 outs
->dma_length
= 0;
295 DBG("sg mapping %d elements:\n", nelems
);
297 max_seg_size
= dma_get_max_seg_size(dev
);
298 for_each_sg(sglist
, s
, nelems
, i
) {
299 unsigned long vaddr
, npages
, entry
, slen
;
307 /* Allocate iommu entries for that segment */
308 vaddr
= (unsigned long) sg_virt(s
);
309 npages
= iommu_num_pages(vaddr
, slen
, IOMMU_PAGE_SIZE
);
311 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& slen
>= PAGE_SIZE
&&
312 (vaddr
& ~PAGE_MASK
) == 0)
313 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
314 entry
= iommu_range_alloc(dev
, tbl
, npages
, &handle
,
315 mask
>> IOMMU_PAGE_SHIFT
, align
);
317 DBG(" - vaddr: %lx, size: %lx\n", vaddr
, slen
);
320 if (unlikely(entry
== DMA_ERROR_CODE
)) {
321 if (printk_ratelimit())
322 dev_info(dev
, "iommu_alloc failed, tbl %p "
323 "vaddr %lx npages %lu\n", tbl
, vaddr
,
328 /* Convert entry to a dma_addr_t */
329 entry
+= tbl
->it_offset
;
330 dma_addr
= entry
<< IOMMU_PAGE_SHIFT
;
331 dma_addr
|= (s
->offset
& ~IOMMU_PAGE_MASK
);
333 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
334 npages
, entry
, dma_addr
);
336 /* Insert into HW table */
337 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
338 vaddr
& IOMMU_PAGE_MASK
,
340 if(unlikely(build_fail
))
343 /* If we are in an open segment, try merging */
345 DBG(" - trying merge...\n");
346 /* We cannot merge if:
347 * - allocated dma_addr isn't contiguous to previous allocation
349 if (novmerge
|| (dma_addr
!= dma_next
) ||
350 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
351 /* Can't merge: create a new segment */
354 outs
= sg_next(outs
);
355 DBG(" can't merge, new segment.\n");
357 outs
->dma_length
+= s
->length
;
358 DBG(" merged, new len: %ux\n", outs
->dma_length
);
363 /* This is a new segment, fill entries */
364 DBG(" - filling new segment.\n");
365 outs
->dma_address
= dma_addr
;
366 outs
->dma_length
= slen
;
369 /* Calculate next page pointer for contiguous check */
370 dma_next
= dma_addr
+ slen
;
372 DBG(" - dma next is: %lx\n", dma_next
);
375 /* Flush/invalidate TLB caches if necessary */
376 if (ppc_md
.tce_flush
)
377 ppc_md
.tce_flush(tbl
);
379 DBG("mapped %d elements:\n", outcount
);
381 /* For the sake of iommu_unmap_sg, we clear out the length in the
382 * next entry of the sglist if we didn't fill the list completely
384 if (outcount
< incount
) {
385 outs
= sg_next(outs
);
386 outs
->dma_address
= DMA_ERROR_CODE
;
387 outs
->dma_length
= 0;
390 /* Make sure updates are seen by hardware */
396 for_each_sg(sglist
, s
, nelems
, i
) {
397 if (s
->dma_length
!= 0) {
398 unsigned long vaddr
, npages
;
400 vaddr
= s
->dma_address
& IOMMU_PAGE_MASK
;
401 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
,
403 __iommu_free(tbl
, vaddr
, npages
);
404 s
->dma_address
= DMA_ERROR_CODE
;
414 void iommu_unmap_sg(struct iommu_table
*tbl
, struct scatterlist
*sglist
,
415 int nelems
, enum dma_data_direction direction
,
416 struct dma_attrs
*attrs
)
418 struct scatterlist
*sg
;
420 BUG_ON(direction
== DMA_NONE
);
428 dma_addr_t dma_handle
= sg
->dma_address
;
430 if (sg
->dma_length
== 0)
432 npages
= iommu_num_pages(dma_handle
, sg
->dma_length
,
434 __iommu_free(tbl
, dma_handle
, npages
);
438 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
439 * do not do an mb() here, the affected platforms do not need it
442 if (ppc_md
.tce_flush
)
443 ppc_md
.tce_flush(tbl
);
446 static void iommu_table_clear(struct iommu_table
*tbl
)
449 * In case of firmware assisted dump system goes through clean
450 * reboot process at the time of system crash. Hence it's safe to
451 * clear the TCE entries if firmware assisted dump is active.
453 if (!is_kdump_kernel() || is_fadump_active()) {
454 /* Clear the table in case firmware left allocations in it */
455 ppc_md
.tce_free(tbl
, tbl
->it_offset
, tbl
->it_size
);
459 #ifdef CONFIG_CRASH_DUMP
460 if (ppc_md
.tce_get
) {
461 unsigned long index
, tceval
, tcecount
= 0;
463 /* Reserve the existing mappings left by the first kernel. */
464 for (index
= 0; index
< tbl
->it_size
; index
++) {
465 tceval
= ppc_md
.tce_get(tbl
, index
+ tbl
->it_offset
);
467 * Freed TCE entry contains 0x7fffffffffffffff on JS20
469 if (tceval
&& (tceval
!= 0x7fffffffffffffffUL
)) {
470 __set_bit(index
, tbl
->it_map
);
475 if ((tbl
->it_size
- tcecount
) < KDUMP_MIN_TCE_ENTRIES
) {
476 printk(KERN_WARNING
"TCE table is full; freeing ");
477 printk(KERN_WARNING
"%d entries for the kdump boot\n",
478 KDUMP_MIN_TCE_ENTRIES
);
479 for (index
= tbl
->it_size
- KDUMP_MIN_TCE_ENTRIES
;
480 index
< tbl
->it_size
; index
++)
481 __clear_bit(index
, tbl
->it_map
);
488 * Build a iommu_table structure. This contains a bit map which
489 * is used to manage allocation of the tce space.
491 struct iommu_table
*iommu_init_table(struct iommu_table
*tbl
, int nid
)
494 static int welcomed
= 0;
497 /* Set aside 1/4 of the table for large allocations. */
498 tbl
->it_halfpoint
= tbl
->it_size
* 3 / 4;
500 /* number of bytes needed for the bitmap */
501 sz
= (tbl
->it_size
+ 7) >> 3;
503 page
= alloc_pages_node(nid
, GFP_ATOMIC
, get_order(sz
));
505 panic("iommu_init_table: Can't allocate %ld bytes\n", sz
);
506 tbl
->it_map
= page_address(page
);
507 memset(tbl
->it_map
, 0, sz
);
510 * Reserve page 0 so it will not be used for any mappings.
511 * This avoids buggy drivers that consider page 0 to be invalid
512 * to crash the machine or even lose data.
514 if (tbl
->it_offset
== 0)
515 set_bit(0, tbl
->it_map
);
518 tbl
->it_largehint
= tbl
->it_halfpoint
;
519 spin_lock_init(&tbl
->it_lock
);
521 iommu_table_clear(tbl
);
524 printk(KERN_INFO
"IOMMU table initialized, virtual merging %s\n",
525 novmerge
? "disabled" : "enabled");
532 void iommu_free_table(struct iommu_table
*tbl
, const char *node_name
)
534 unsigned long bitmap_sz
, i
;
537 if (!tbl
|| !tbl
->it_map
) {
538 printk(KERN_ERR
"%s: expected TCE map for %s\n", __func__
,
543 /* verify that table contains no entries */
544 /* it_size is in entries, and we're examining 64 at a time */
545 for (i
= 0; i
< (tbl
->it_size
/64); i
++) {
546 if (tbl
->it_map
[i
] != 0) {
547 printk(KERN_WARNING
"%s: Unexpected TCEs for %s\n",
548 __func__
, node_name
);
553 /* calculate bitmap size in bytes */
554 bitmap_sz
= (tbl
->it_size
+ 7) / 8;
557 order
= get_order(bitmap_sz
);
558 free_pages((unsigned long) tbl
->it_map
, order
);
564 /* Creates TCEs for a user provided buffer. The user buffer must be
565 * contiguous real kernel storage (not vmalloc). The address passed here
566 * comprises a page address and offset into that page. The dma_addr_t
567 * returned will point to the same byte within the page as was passed in.
569 dma_addr_t
iommu_map_page(struct device
*dev
, struct iommu_table
*tbl
,
570 struct page
*page
, unsigned long offset
, size_t size
,
571 unsigned long mask
, enum dma_data_direction direction
,
572 struct dma_attrs
*attrs
)
574 dma_addr_t dma_handle
= DMA_ERROR_CODE
;
577 unsigned int npages
, align
;
579 BUG_ON(direction
== DMA_NONE
);
581 vaddr
= page_address(page
) + offset
;
582 uaddr
= (unsigned long)vaddr
;
583 npages
= iommu_num_pages(uaddr
, size
, IOMMU_PAGE_SIZE
);
587 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& size
>= PAGE_SIZE
&&
588 ((unsigned long)vaddr
& ~PAGE_MASK
) == 0)
589 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
591 dma_handle
= iommu_alloc(dev
, tbl
, vaddr
, npages
, direction
,
592 mask
>> IOMMU_PAGE_SHIFT
, align
,
594 if (dma_handle
== DMA_ERROR_CODE
) {
595 if (printk_ratelimit()) {
596 dev_info(dev
, "iommu_alloc failed, tbl %p "
597 "vaddr %p npages %d\n", tbl
, vaddr
,
601 dma_handle
|= (uaddr
& ~IOMMU_PAGE_MASK
);
607 void iommu_unmap_page(struct iommu_table
*tbl
, dma_addr_t dma_handle
,
608 size_t size
, enum dma_data_direction direction
,
609 struct dma_attrs
*attrs
)
613 BUG_ON(direction
== DMA_NONE
);
616 npages
= iommu_num_pages(dma_handle
, size
, IOMMU_PAGE_SIZE
);
617 iommu_free(tbl
, dma_handle
, npages
);
621 /* Allocates a contiguous real buffer and creates mappings over it.
622 * Returns the virtual address of the buffer and sets dma_handle
623 * to the dma address (mapping) of the first page.
625 void *iommu_alloc_coherent(struct device
*dev
, struct iommu_table
*tbl
,
626 size_t size
, dma_addr_t
*dma_handle
,
627 unsigned long mask
, gfp_t flag
, int node
)
632 unsigned int nio_pages
, io_order
;
635 size
= PAGE_ALIGN(size
);
636 order
= get_order(size
);
639 * Client asked for way too much space. This is checked later
640 * anyway. It is easier to debug here for the drivers than in
643 if (order
>= IOMAP_MAX_ORDER
) {
644 dev_info(dev
, "iommu_alloc_consistent size too large: 0x%lx\n",
652 /* Alloc enough pages (and possibly more) */
653 page
= alloc_pages_node(node
, flag
, order
);
656 ret
= page_address(page
);
657 memset(ret
, 0, size
);
659 /* Set up tces to cover the allocated range */
660 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
661 io_order
= get_iommu_order(size
);
662 mapping
= iommu_alloc(dev
, tbl
, ret
, nio_pages
, DMA_BIDIRECTIONAL
,
663 mask
>> IOMMU_PAGE_SHIFT
, io_order
, NULL
);
664 if (mapping
== DMA_ERROR_CODE
) {
665 free_pages((unsigned long)ret
, order
);
668 *dma_handle
= mapping
;
672 void iommu_free_coherent(struct iommu_table
*tbl
, size_t size
,
673 void *vaddr
, dma_addr_t dma_handle
)
676 unsigned int nio_pages
;
678 size
= PAGE_ALIGN(size
);
679 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
680 iommu_free(tbl
, dma_handle
, nio_pages
);
681 size
= PAGE_ALIGN(size
);
682 free_pages((unsigned long)vaddr
, get_order(size
));