2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma-contiguous.h>
21 #include <linux/highmem.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/iommu.h>
26 #include <linux/vmalloc.h>
27 #include <linux/sizes.h>
29 #include <asm/memory.h>
30 #include <asm/highmem.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mach/arch.h>
34 #include <asm/dma-iommu.h>
35 #include <asm/mach/map.h>
36 #include <asm/system_info.h>
37 #include <asm/dma-contiguous.h>
42 * The DMA API is built upon the notion of "buffer ownership". A buffer
43 * is either exclusively owned by the CPU (and therefore may be accessed
44 * by it) or exclusively owned by the DMA device. These helper functions
45 * represent the transitions between these two ownership states.
47 * Note, however, that on later ARMs, this notion does not work due to
48 * speculative prefetches. We model our approach on the assumption that
49 * the CPU does do speculative prefetches, which means we clean caches
50 * before transfers and delay cache invalidation until transfer completion.
53 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
54 size_t, enum dma_data_direction
);
55 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
56 size_t, enum dma_data_direction
);
59 * arm_dma_map_page - map a portion of a page for streaming DMA
60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
61 * @page: page that buffer resides in
62 * @offset: offset into page for start of buffer
63 * @size: size of buffer to map
64 * @dir: DMA transfer direction
66 * Ensure that any data held in the cache is appropriately discarded
69 * The device owns this memory once this call has completed. The CPU
70 * can regain ownership by calling dma_unmap_page().
72 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
73 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
74 struct dma_attrs
*attrs
)
76 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
77 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
78 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
82 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
83 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
84 * @handle: DMA address of buffer
85 * @size: size of buffer (same as passed to dma_map_page)
86 * @dir: DMA transfer direction (same as passed to dma_map_page)
88 * Unmap a page streaming mode DMA translation. The handle and size
89 * must match what was provided in the previous dma_map_page() call.
90 * All other usages are undefined.
92 * After this call, reads by the CPU to the buffer are guaranteed to see
93 * whatever the device wrote there.
95 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
96 size_t size
, enum dma_data_direction dir
,
97 struct dma_attrs
*attrs
)
99 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
100 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
101 handle
& ~PAGE_MASK
, size
, dir
);
104 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
105 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
107 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
108 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
109 if (!arch_is_coherent())
110 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
113 static void arm_dma_sync_single_for_device(struct device
*dev
,
114 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
116 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
117 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
118 if (!arch_is_coherent())
119 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
122 static int arm_dma_set_mask(struct device
*dev
, u64 dma_mask
);
124 struct dma_map_ops arm_dma_ops
= {
125 .alloc
= arm_dma_alloc
,
126 .free
= arm_dma_free
,
127 .mmap
= arm_dma_mmap
,
128 .get_sgtable
= arm_dma_get_sgtable
,
129 .map_page
= arm_dma_map_page
,
130 .unmap_page
= arm_dma_unmap_page
,
131 .map_sg
= arm_dma_map_sg
,
132 .unmap_sg
= arm_dma_unmap_sg
,
133 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
134 .sync_single_for_device
= arm_dma_sync_single_for_device
,
135 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
136 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
137 .set_dma_mask
= arm_dma_set_mask
,
139 EXPORT_SYMBOL(arm_dma_ops
);
141 static u64
get_coherent_dma_mask(struct device
*dev
)
143 u64 mask
= (u64
)arm_dma_limit
;
146 mask
= dev
->coherent_dma_mask
;
149 * Sanity check the DMA mask - it must be non-zero, and
150 * must be able to be satisfied by a DMA allocation.
153 dev_warn(dev
, "coherent DMA mask is unset\n");
157 if ((~mask
) & (u64
)arm_dma_limit
) {
158 dev_warn(dev
, "coherent DMA mask %#llx is smaller "
159 "than system GFP_DMA mask %#llx\n",
160 mask
, (u64
)arm_dma_limit
);
168 static void __dma_clear_buffer(struct page
*page
, size_t size
)
172 * Ensure that the allocated pages are zeroed, and that any data
173 * lurking in the kernel direct-mapped region is invalidated.
175 ptr
= page_address(page
);
177 memset(ptr
, 0, size
);
178 dmac_flush_range(ptr
, ptr
+ size
);
179 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
184 * Allocate a DMA buffer for 'dev' of size 'size' using the
185 * specified gfp mask. Note that 'size' must be page aligned.
187 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
189 unsigned long order
= get_order(size
);
190 struct page
*page
, *p
, *e
;
192 page
= alloc_pages(gfp
, order
);
197 * Now split the huge page and free the excess pages
199 split_page(page
, order
);
200 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
203 __dma_clear_buffer(page
, size
);
209 * Free a DMA buffer. 'size' must be page aligned.
211 static void __dma_free_buffer(struct page
*page
, size_t size
)
213 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
222 #ifdef CONFIG_HUGETLB_PAGE
223 #error ARM Coherent DMA allocator does not (yet) support huge TLB
226 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
227 pgprot_t prot
, struct page
**ret_page
);
229 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
230 pgprot_t prot
, struct page
**ret_page
,
234 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
237 struct vm_struct
*area
;
241 * DMA allocation can be mapped to user space, so lets
242 * set VM_USERMAP flags too.
244 area
= get_vm_area_caller(size
, VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
248 addr
= (unsigned long)area
->addr
;
249 area
->phys_addr
= __pfn_to_phys(page_to_pfn(page
));
251 if (ioremap_page_range(addr
, addr
+ size
, area
->phys_addr
, prot
)) {
252 vunmap((void *)addr
);
258 static void __dma_free_remap(void *cpu_addr
, size_t size
)
260 unsigned int flags
= VM_ARM_DMA_CONSISTENT
| VM_USERMAP
;
261 struct vm_struct
*area
= find_vm_area(cpu_addr
);
262 if (!area
|| (area
->flags
& flags
) != flags
) {
263 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
266 unmap_kernel_range((unsigned long)cpu_addr
, size
);
273 unsigned long *bitmap
;
274 unsigned long nr_pages
;
279 static struct dma_pool atomic_pool
= {
283 static int __init
early_coherent_pool(char *p
)
285 atomic_pool
.size
= memparse(p
, &p
);
288 early_param("coherent_pool", early_coherent_pool
);
291 * Initialise the coherent pool for atomic allocations.
293 static int __init
atomic_pool_init(void)
295 struct dma_pool
*pool
= &atomic_pool
;
296 pgprot_t prot
= pgprot_dmacoherent(pgprot_kernel
);
297 unsigned long nr_pages
= pool
->size
>> PAGE_SHIFT
;
298 unsigned long *bitmap
;
301 int bitmap_size
= BITS_TO_LONGS(nr_pages
) * sizeof(long);
303 bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
307 if (IS_ENABLED(CONFIG_CMA
))
308 ptr
= __alloc_from_contiguous(NULL
, pool
->size
, prot
, &page
);
310 ptr
= __alloc_remap_buffer(NULL
, pool
->size
, GFP_KERNEL
, prot
,
313 spin_lock_init(&pool
->lock
);
316 pool
->bitmap
= bitmap
;
317 pool
->nr_pages
= nr_pages
;
318 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
319 (unsigned)pool
->size
/ 1024);
324 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
325 (unsigned)pool
->size
/ 1024);
329 * CMA is activated by core_initcall, so we must be called after it.
331 postcore_initcall(atomic_pool_init
);
333 struct dma_contig_early_reserve
{
338 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
340 static int dma_mmu_remap_num __initdata
;
342 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
344 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
345 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
349 void __init
dma_contiguous_remap(void)
352 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
353 phys_addr_t start
= dma_mmu_remap
[i
].base
;
354 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
358 if (end
> arm_lowmem_limit
)
359 end
= arm_lowmem_limit
;
363 map
.pfn
= __phys_to_pfn(start
);
364 map
.virtual = __phys_to_virt(start
);
365 map
.length
= end
- start
;
366 map
.type
= MT_MEMORY_DMA_READY
;
369 * Clear previous low-memory mapping
371 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
373 pmd_clear(pmd_off_k(addr
));
375 iotable_init(&map
, 1);
379 static int __dma_update_pte(pte_t
*pte
, pgtable_t token
, unsigned long addr
,
382 struct page
*page
= virt_to_page(addr
);
383 pgprot_t prot
= *(pgprot_t
*)data
;
385 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
389 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
391 unsigned long start
= (unsigned long) page_address(page
);
392 unsigned end
= start
+ size
;
394 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
396 flush_tlb_kernel_range(start
, end
);
399 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
400 pgprot_t prot
, struct page
**ret_page
,
405 page
= __dma_alloc_buffer(dev
, size
, gfp
);
409 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
411 __dma_free_buffer(page
, size
);
419 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
421 struct dma_pool
*pool
= &atomic_pool
;
422 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
426 unsigned long align_mask
;
429 WARN(1, "coherent pool not initialised!\n");
434 * Align the region allocation - allocations from pool are rather
435 * small, so align them to their order in pages, minimum is a page
436 * size. This helps reduce fragmentation of the DMA space.
438 align_mask
= (1 << get_order(size
)) - 1;
440 spin_lock_irqsave(&pool
->lock
, flags
);
441 pageno
= bitmap_find_next_zero_area(pool
->bitmap
, pool
->nr_pages
,
442 0, count
, align_mask
);
443 if (pageno
< pool
->nr_pages
) {
444 bitmap_set(pool
->bitmap
, pageno
, count
);
445 ptr
= pool
->vaddr
+ PAGE_SIZE
* pageno
;
446 *ret_page
= pool
->page
+ pageno
;
448 spin_unlock_irqrestore(&pool
->lock
, flags
);
453 static int __free_from_pool(void *start
, size_t size
)
455 struct dma_pool
*pool
= &atomic_pool
;
456 unsigned long pageno
, count
;
459 if (start
< pool
->vaddr
|| start
> pool
->vaddr
+ pool
->size
)
462 if (start
+ size
> pool
->vaddr
+ pool
->size
) {
463 WARN(1, "freeing wrong coherent size from pool\n");
467 pageno
= (start
- pool
->vaddr
) >> PAGE_SHIFT
;
468 count
= size
>> PAGE_SHIFT
;
470 spin_lock_irqsave(&pool
->lock
, flags
);
471 bitmap_clear(pool
->bitmap
, pageno
, count
);
472 spin_unlock_irqrestore(&pool
->lock
, flags
);
477 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
478 pgprot_t prot
, struct page
**ret_page
)
480 unsigned long order
= get_order(size
);
481 size_t count
= size
>> PAGE_SHIFT
;
484 page
= dma_alloc_from_contiguous(dev
, count
, order
);
488 __dma_clear_buffer(page
, size
);
489 __dma_remap(page
, size
, prot
);
492 return page_address(page
);
495 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
498 __dma_remap(page
, size
, pgprot_kernel
);
499 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
502 static inline pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
)
504 prot
= dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
) ?
505 pgprot_writecombine(prot
) :
506 pgprot_dmacoherent(prot
);
512 #else /* !CONFIG_MMU */
516 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
517 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
518 #define __alloc_from_pool(size, ret_page) NULL
519 #define __alloc_from_contiguous(dev, size, prot, ret) NULL
520 #define __free_from_pool(cpu_addr, size) 0
521 #define __free_from_contiguous(dev, page, size) do { } while (0)
522 #define __dma_free_remap(cpu_addr, size) do { } while (0)
524 #endif /* CONFIG_MMU */
526 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
527 struct page
**ret_page
)
530 page
= __dma_alloc_buffer(dev
, size
, gfp
);
535 return page_address(page
);
540 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
541 gfp_t gfp
, pgprot_t prot
, const void *caller
)
543 u64 mask
= get_coherent_dma_mask(dev
);
547 #ifdef CONFIG_DMA_API_DEBUG
548 u64 limit
= (mask
+ 1) & ~mask
;
549 if (limit
&& size
>= limit
) {
550 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
559 if (mask
< 0xffffffffULL
)
563 * Following is a work-around (a.k.a. hack) to prevent pages
564 * with __GFP_COMP being passed to split_page() which cannot
565 * handle them. The real problem is that this flag probably
566 * should be 0 on ARM as it is not supported on this
567 * platform; see CONFIG_HUGETLBFS.
569 gfp
&= ~(__GFP_COMP
);
571 *handle
= DMA_ERROR_CODE
;
572 size
= PAGE_ALIGN(size
);
574 if (arch_is_coherent() || nommu())
575 addr
= __alloc_simple_buffer(dev
, size
, gfp
, &page
);
576 else if (gfp
& GFP_ATOMIC
)
577 addr
= __alloc_from_pool(size
, &page
);
578 else if (!IS_ENABLED(CONFIG_CMA
))
579 addr
= __alloc_remap_buffer(dev
, size
, gfp
, prot
, &page
, caller
);
581 addr
= __alloc_from_contiguous(dev
, size
, prot
, &page
);
584 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
590 * Allocate DMA-coherent memory space and return both the kernel remapped
591 * virtual and bus address for that space.
593 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
594 gfp_t gfp
, struct dma_attrs
*attrs
)
596 pgprot_t prot
= __get_dma_pgprot(attrs
, pgprot_kernel
);
599 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
602 return __dma_alloc(dev
, size
, handle
, gfp
, prot
,
603 __builtin_return_address(0));
607 * Create userspace mapping for the DMA-coherent memory.
609 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
610 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
611 struct dma_attrs
*attrs
)
615 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
616 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
617 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
618 unsigned long off
= vma
->vm_pgoff
;
620 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
622 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
625 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
626 ret
= remap_pfn_range(vma
, vma
->vm_start
,
628 vma
->vm_end
- vma
->vm_start
,
631 #endif /* CONFIG_MMU */
637 * Free a buffer as defined by the above mapping.
639 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
640 dma_addr_t handle
, struct dma_attrs
*attrs
)
642 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
644 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
647 size
= PAGE_ALIGN(size
);
649 if (arch_is_coherent() || nommu()) {
650 __dma_free_buffer(page
, size
);
651 } else if (__free_from_pool(cpu_addr
, size
)) {
653 } else if (!IS_ENABLED(CONFIG_CMA
)) {
654 __dma_free_remap(cpu_addr
, size
);
655 __dma_free_buffer(page
, size
);
658 * Non-atomic allocations cannot be freed with IRQs disabled
660 WARN_ON(irqs_disabled());
661 __free_from_contiguous(dev
, page
, size
);
665 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
666 void *cpu_addr
, dma_addr_t handle
, size_t size
,
667 struct dma_attrs
*attrs
)
669 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
672 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
676 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
680 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
681 size_t size
, enum dma_data_direction dir
,
682 void (*op
)(const void *, size_t, int))
685 * A single sg entry may refer to multiple physically contiguous
686 * pages. But we still need to process highmem pages individually.
687 * If highmem is not configured then the bulk of this loop gets
695 if (PageHighMem(page
)) {
696 if (len
+ offset
> PAGE_SIZE
) {
697 if (offset
>= PAGE_SIZE
) {
698 page
+= offset
/ PAGE_SIZE
;
701 len
= PAGE_SIZE
- offset
;
703 vaddr
= kmap_high_get(page
);
708 } else if (cache_is_vipt()) {
709 /* unmapped pages might still be cached */
710 vaddr
= kmap_atomic(page
);
711 op(vaddr
+ offset
, len
, dir
);
712 kunmap_atomic(vaddr
);
715 vaddr
= page_address(page
) + offset
;
725 * Make an area consistent for devices.
726 * Note: Drivers should NOT use this function directly, as it will break
727 * platforms with CONFIG_DMABOUNCE.
728 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
730 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
731 size_t size
, enum dma_data_direction dir
)
735 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
737 paddr
= page_to_phys(page
) + off
;
738 if (dir
== DMA_FROM_DEVICE
) {
739 outer_inv_range(paddr
, paddr
+ size
);
741 outer_clean_range(paddr
, paddr
+ size
);
743 /* FIXME: non-speculating: flush on bidirectional mappings? */
746 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
747 size_t size
, enum dma_data_direction dir
)
749 unsigned long paddr
= page_to_phys(page
) + off
;
751 /* FIXME: non-speculating: not required */
752 /* don't bother invalidating if DMA to device */
753 if (dir
!= DMA_TO_DEVICE
)
754 outer_inv_range(paddr
, paddr
+ size
);
756 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
759 * Mark the D-cache clean for this page to avoid extra flushing.
761 if (dir
!= DMA_TO_DEVICE
&& off
== 0 && size
>= PAGE_SIZE
)
762 set_bit(PG_dcache_clean
, &page
->flags
);
766 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
767 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
768 * @sg: list of buffers
769 * @nents: number of buffers to map
770 * @dir: DMA transfer direction
772 * Map a set of buffers described by scatterlist in streaming mode for DMA.
773 * This is the scatter-gather version of the dma_map_single interface.
774 * Here the scatter gather list elements are each tagged with the
775 * appropriate dma address and length. They are obtained via
776 * sg_dma_{address,length}.
778 * Device ownership issues as mentioned for dma_map_single are the same
781 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
782 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
784 struct dma_map_ops
*ops
= get_dma_ops(dev
);
785 struct scatterlist
*s
;
788 for_each_sg(sg
, s
, nents
, i
) {
789 #ifdef CONFIG_NEED_SG_DMA_LENGTH
790 s
->dma_length
= s
->length
;
792 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
793 s
->length
, dir
, attrs
);
794 if (dma_mapping_error(dev
, s
->dma_address
))
800 for_each_sg(sg
, s
, i
, j
)
801 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
806 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
807 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
808 * @sg: list of buffers
809 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
810 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
812 * Unmap a set of streaming mode DMA translations. Again, CPU access
813 * rules concerning calls here are the same as for dma_unmap_single().
815 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
816 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
818 struct dma_map_ops
*ops
= get_dma_ops(dev
);
819 struct scatterlist
*s
;
823 for_each_sg(sg
, s
, nents
, i
)
824 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
828 * arm_dma_sync_sg_for_cpu
829 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
830 * @sg: list of buffers
831 * @nents: number of buffers to map (returned from dma_map_sg)
832 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
834 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
835 int nents
, enum dma_data_direction dir
)
837 struct dma_map_ops
*ops
= get_dma_ops(dev
);
838 struct scatterlist
*s
;
841 for_each_sg(sg
, s
, nents
, i
)
842 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
847 * arm_dma_sync_sg_for_device
848 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
849 * @sg: list of buffers
850 * @nents: number of buffers to map (returned from dma_map_sg)
851 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
853 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
854 int nents
, enum dma_data_direction dir
)
856 struct dma_map_ops
*ops
= get_dma_ops(dev
);
857 struct scatterlist
*s
;
860 for_each_sg(sg
, s
, nents
, i
)
861 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
866 * Return whether the given device DMA address mask can be supported
867 * properly. For example, if your device can only drive the low 24-bits
868 * during bus mastering, then you would pass 0x00ffffff as the mask
871 int dma_supported(struct device
*dev
, u64 mask
)
873 if (mask
< (u64
)arm_dma_limit
)
877 EXPORT_SYMBOL(dma_supported
);
879 static int arm_dma_set_mask(struct device
*dev
, u64 dma_mask
)
881 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
884 *dev
->dma_mask
= dma_mask
;
889 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
891 static int __init
dma_debug_do_init(void)
893 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
896 fs_initcall(dma_debug_do_init
);
898 #ifdef CONFIG_ARM_DMA_USE_IOMMU
902 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
905 unsigned int order
= get_order(size
);
906 unsigned int align
= 0;
907 unsigned int count
, start
;
910 count
= ((PAGE_ALIGN(size
) >> PAGE_SHIFT
) +
911 (1 << mapping
->order
) - 1) >> mapping
->order
;
913 if (order
> mapping
->order
)
914 align
= (1 << (order
- mapping
->order
)) - 1;
916 spin_lock_irqsave(&mapping
->lock
, flags
);
917 start
= bitmap_find_next_zero_area(mapping
->bitmap
, mapping
->bits
, 0,
919 if (start
> mapping
->bits
) {
920 spin_unlock_irqrestore(&mapping
->lock
, flags
);
921 return DMA_ERROR_CODE
;
924 bitmap_set(mapping
->bitmap
, start
, count
);
925 spin_unlock_irqrestore(&mapping
->lock
, flags
);
927 return mapping
->base
+ (start
<< (mapping
->order
+ PAGE_SHIFT
));
930 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
931 dma_addr_t addr
, size_t size
)
933 unsigned int start
= (addr
- mapping
->base
) >>
934 (mapping
->order
+ PAGE_SHIFT
);
935 unsigned int count
= ((size
>> PAGE_SHIFT
) +
936 (1 << mapping
->order
) - 1) >> mapping
->order
;
939 spin_lock_irqsave(&mapping
->lock
, flags
);
940 bitmap_clear(mapping
->bitmap
, start
, count
);
941 spin_unlock_irqrestore(&mapping
->lock
, flags
);
944 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
947 int count
= size
>> PAGE_SHIFT
;
948 int array_size
= count
* sizeof(struct page
*);
951 if (array_size
<= PAGE_SIZE
)
952 pages
= kzalloc(array_size
, gfp
);
954 pages
= vzalloc(array_size
);
959 int j
, order
= __fls(count
);
961 pages
[i
] = alloc_pages(gfp
| __GFP_NOWARN
, order
);
962 while (!pages
[i
] && order
)
963 pages
[i
] = alloc_pages(gfp
| __GFP_NOWARN
, --order
);
968 split_page(pages
[i
], order
);
971 pages
[i
+ j
] = pages
[i
] + j
;
973 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
);
982 __free_pages(pages
[i
], 0);
983 if (array_size
<= PAGE_SIZE
)
990 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
, size_t size
)
992 int count
= size
>> PAGE_SHIFT
;
993 int array_size
= count
* sizeof(struct page
*);
995 for (i
= 0; i
< count
; i
++)
997 __free_pages(pages
[i
], 0);
998 if (array_size
<= PAGE_SIZE
)
1006 * Create a CPU mapping for a specified pages
1009 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1012 unsigned int i
, nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1013 struct vm_struct
*area
;
1016 area
= get_vm_area_caller(size
, VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
1021 area
->pages
= pages
;
1022 area
->nr_pages
= nr_pages
;
1023 p
= (unsigned long)area
->addr
;
1025 for (i
= 0; i
< nr_pages
; i
++) {
1026 phys_addr_t phys
= __pfn_to_phys(page_to_pfn(pages
[i
]));
1027 if (ioremap_page_range(p
, p
+ PAGE_SIZE
, phys
, prot
))
1033 unmap_kernel_range((unsigned long)area
->addr
, size
);
1039 * Create a mapping in device IO address space for specified pages
1042 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
)
1044 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1045 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1046 dma_addr_t dma_addr
, iova
;
1047 int i
, ret
= DMA_ERROR_CODE
;
1049 dma_addr
= __alloc_iova(mapping
, size
);
1050 if (dma_addr
== DMA_ERROR_CODE
)
1054 for (i
= 0; i
< count
; ) {
1055 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1056 phys_addr_t phys
= page_to_phys(pages
[i
]);
1057 unsigned int len
, j
;
1059 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1060 if (page_to_pfn(pages
[j
]) != next_pfn
)
1063 len
= (j
- i
) << PAGE_SHIFT
;
1064 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, 0);
1072 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1073 __free_iova(mapping
, dma_addr
, size
);
1074 return DMA_ERROR_CODE
;
1077 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1079 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1082 * add optional in-page offset from iova to size and align
1083 * result to page size
1085 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1088 iommu_unmap(mapping
->domain
, iova
, size
);
1089 __free_iova(mapping
, iova
, size
);
1093 static struct page
**__iommu_get_pages(void *cpu_addr
, struct dma_attrs
*attrs
)
1095 struct vm_struct
*area
;
1097 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1100 area
= find_vm_area(cpu_addr
);
1101 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1106 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1107 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
1109 pgprot_t prot
= __get_dma_pgprot(attrs
, pgprot_kernel
);
1110 struct page
**pages
;
1113 *handle
= DMA_ERROR_CODE
;
1114 size
= PAGE_ALIGN(size
);
1116 pages
= __iommu_alloc_buffer(dev
, size
, gfp
);
1120 *handle
= __iommu_create_mapping(dev
, pages
, size
);
1121 if (*handle
== DMA_ERROR_CODE
)
1124 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1127 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1128 __builtin_return_address(0));
1135 __iommu_remove_mapping(dev
, *handle
, size
);
1137 __iommu_free_buffer(dev
, pages
, size
);
1141 static int arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1142 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1143 struct dma_attrs
*attrs
)
1145 unsigned long uaddr
= vma
->vm_start
;
1146 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1147 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1149 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1155 int ret
= vm_insert_page(vma
, uaddr
, *pages
++);
1157 pr_err("Remapping memory failed: %d\n", ret
);
1162 } while (usize
> 0);
1168 * free a page as defined by the above mapping.
1169 * Must not be called with IRQs disabled.
1171 void arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1172 dma_addr_t handle
, struct dma_attrs
*attrs
)
1174 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1175 size
= PAGE_ALIGN(size
);
1178 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1182 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
)) {
1183 unmap_kernel_range((unsigned long)cpu_addr
, size
);
1187 __iommu_remove_mapping(dev
, handle
, size
);
1188 __iommu_free_buffer(dev
, pages
, size
);
1191 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1192 void *cpu_addr
, dma_addr_t dma_addr
,
1193 size_t size
, struct dma_attrs
*attrs
)
1195 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1196 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1201 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1206 * Map a part of the scatter-gather list into contiguous io address space
1208 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1209 size_t size
, dma_addr_t
*handle
,
1210 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1212 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1213 dma_addr_t iova
, iova_base
;
1216 struct scatterlist
*s
;
1218 size
= PAGE_ALIGN(size
);
1219 *handle
= DMA_ERROR_CODE
;
1221 iova_base
= iova
= __alloc_iova(mapping
, size
);
1222 if (iova
== DMA_ERROR_CODE
)
1225 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1226 phys_addr_t phys
= page_to_phys(sg_page(s
));
1227 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1229 if (!arch_is_coherent() &&
1230 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1231 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1233 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, 0);
1236 count
+= len
>> PAGE_SHIFT
;
1239 *handle
= iova_base
;
1243 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1244 __free_iova(mapping
, iova_base
, size
);
1249 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1250 * @dev: valid struct device pointer
1251 * @sg: list of buffers
1252 * @nents: number of buffers to map
1253 * @dir: DMA transfer direction
1255 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1256 * The scatter gather list elements are merged together (if possible) and
1257 * tagged with the appropriate dma address and length. They are obtained via
1258 * sg_dma_{address,length}.
1260 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1261 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1263 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1265 unsigned int offset
= s
->offset
;
1266 unsigned int size
= s
->offset
+ s
->length
;
1267 unsigned int max
= dma_get_max_seg_size(dev
);
1269 for (i
= 1; i
< nents
; i
++) {
1272 s
->dma_address
= DMA_ERROR_CODE
;
1275 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1276 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1280 dma
->dma_address
+= offset
;
1281 dma
->dma_length
= size
- offset
;
1283 size
= offset
= s
->offset
;
1290 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
) < 0)
1293 dma
->dma_address
+= offset
;
1294 dma
->dma_length
= size
- offset
;
1299 for_each_sg(sg
, s
, count
, i
)
1300 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1305 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1306 * @dev: valid struct device pointer
1307 * @sg: list of buffers
1308 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1309 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1311 * Unmap a set of streaming mode DMA translations. Again, CPU access
1312 * rules concerning calls here are the same as for dma_unmap_single().
1314 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1315 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1317 struct scatterlist
*s
;
1320 for_each_sg(sg
, s
, nents
, i
) {
1322 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1324 if (!arch_is_coherent() &&
1325 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1326 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1332 * arm_iommu_sync_sg_for_cpu
1333 * @dev: valid struct device pointer
1334 * @sg: list of buffers
1335 * @nents: number of buffers to map (returned from dma_map_sg)
1336 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1338 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1339 int nents
, enum dma_data_direction dir
)
1341 struct scatterlist
*s
;
1344 for_each_sg(sg
, s
, nents
, i
)
1345 if (!arch_is_coherent())
1346 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1351 * arm_iommu_sync_sg_for_device
1352 * @dev: valid struct device pointer
1353 * @sg: list of buffers
1354 * @nents: number of buffers to map (returned from dma_map_sg)
1355 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1357 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1358 int nents
, enum dma_data_direction dir
)
1360 struct scatterlist
*s
;
1363 for_each_sg(sg
, s
, nents
, i
)
1364 if (!arch_is_coherent())
1365 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1370 * arm_iommu_map_page
1371 * @dev: valid struct device pointer
1372 * @page: page that buffer resides in
1373 * @offset: offset into page for start of buffer
1374 * @size: size of buffer to map
1375 * @dir: DMA transfer direction
1377 * IOMMU aware version of arm_dma_map_page()
1379 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1380 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1381 struct dma_attrs
*attrs
)
1383 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1384 dma_addr_t dma_addr
;
1385 int ret
, len
= PAGE_ALIGN(size
+ offset
);
1387 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1388 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1390 dma_addr
= __alloc_iova(mapping
, len
);
1391 if (dma_addr
== DMA_ERROR_CODE
)
1394 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, 0);
1398 return dma_addr
+ offset
;
1400 __free_iova(mapping
, dma_addr
, len
);
1401 return DMA_ERROR_CODE
;
1405 * arm_iommu_unmap_page
1406 * @dev: valid struct device pointer
1407 * @handle: DMA address of buffer
1408 * @size: size of buffer (same as passed to dma_map_page)
1409 * @dir: DMA transfer direction (same as passed to dma_map_page)
1411 * IOMMU aware version of arm_dma_unmap_page()
1413 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1414 size_t size
, enum dma_data_direction dir
,
1415 struct dma_attrs
*attrs
)
1417 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1418 dma_addr_t iova
= handle
& PAGE_MASK
;
1419 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1420 int offset
= handle
& ~PAGE_MASK
;
1421 int len
= PAGE_ALIGN(size
+ offset
);
1426 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1427 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1429 iommu_unmap(mapping
->domain
, iova
, len
);
1430 __free_iova(mapping
, iova
, len
);
1433 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
1434 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1436 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1437 dma_addr_t iova
= handle
& PAGE_MASK
;
1438 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1439 unsigned int offset
= handle
& ~PAGE_MASK
;
1444 if (!arch_is_coherent())
1445 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1448 static void arm_iommu_sync_single_for_device(struct device
*dev
,
1449 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1451 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1452 dma_addr_t iova
= handle
& PAGE_MASK
;
1453 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1454 unsigned int offset
= handle
& ~PAGE_MASK
;
1459 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1462 struct dma_map_ops iommu_ops
= {
1463 .alloc
= arm_iommu_alloc_attrs
,
1464 .free
= arm_iommu_free_attrs
,
1465 .mmap
= arm_iommu_mmap_attrs
,
1466 .get_sgtable
= arm_iommu_get_sgtable
,
1468 .map_page
= arm_iommu_map_page
,
1469 .unmap_page
= arm_iommu_unmap_page
,
1470 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
1471 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
1473 .map_sg
= arm_iommu_map_sg
,
1474 .unmap_sg
= arm_iommu_unmap_sg
,
1475 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
1476 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
1480 * arm_iommu_create_mapping
1481 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1482 * @base: start address of the valid IO address space
1483 * @size: size of the valid IO address space
1484 * @order: accuracy of the IO addresses allocations
1486 * Creates a mapping structure which holds information about used/unused
1487 * IO address ranges, which is required to perform memory allocation and
1488 * mapping with IOMMU aware functions.
1490 * The client device need to be attached to the mapping with
1491 * arm_iommu_attach_device function.
1493 struct dma_iommu_mapping
*
1494 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, size_t size
,
1497 unsigned int count
= size
>> (PAGE_SHIFT
+ order
);
1498 unsigned int bitmap_size
= BITS_TO_LONGS(count
) * sizeof(long);
1499 struct dma_iommu_mapping
*mapping
;
1503 return ERR_PTR(-EINVAL
);
1505 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
1509 mapping
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
1510 if (!mapping
->bitmap
)
1513 mapping
->base
= base
;
1514 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
1515 mapping
->order
= order
;
1516 spin_lock_init(&mapping
->lock
);
1518 mapping
->domain
= iommu_domain_alloc(bus
);
1519 if (!mapping
->domain
)
1522 kref_init(&mapping
->kref
);
1525 kfree(mapping
->bitmap
);
1529 return ERR_PTR(err
);
1532 static void release_iommu_mapping(struct kref
*kref
)
1534 struct dma_iommu_mapping
*mapping
=
1535 container_of(kref
, struct dma_iommu_mapping
, kref
);
1537 iommu_domain_free(mapping
->domain
);
1538 kfree(mapping
->bitmap
);
1542 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
1545 kref_put(&mapping
->kref
, release_iommu_mapping
);
1549 * arm_iommu_attach_device
1550 * @dev: valid struct device pointer
1551 * @mapping: io address space mapping structure (returned from
1552 * arm_iommu_create_mapping)
1554 * Attaches specified io address space mapping to the provided device,
1555 * this replaces the dma operations (dma_map_ops pointer) with the
1556 * IOMMU aware version. More than one client might be attached to
1557 * the same io address space mapping.
1559 int arm_iommu_attach_device(struct device
*dev
,
1560 struct dma_iommu_mapping
*mapping
)
1564 err
= iommu_attach_device(mapping
->domain
, dev
);
1568 kref_get(&mapping
->kref
);
1569 dev
->archdata
.mapping
= mapping
;
1570 set_dma_ops(dev
, &iommu_ops
);
1572 pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev
));