2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma-contiguous.h>
21 #include <linux/highmem.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/iommu.h>
26 #include <linux/vmalloc.h>
27 #include <linux/sizes.h>
29 #include <asm/memory.h>
30 #include <asm/highmem.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mach/arch.h>
34 #include <asm/dma-iommu.h>
35 #include <asm/mach/map.h>
36 #include <asm/system_info.h>
37 #include <asm/dma-contiguous.h>
42 * The DMA API is built upon the notion of "buffer ownership". A buffer
43 * is either exclusively owned by the CPU (and therefore may be accessed
44 * by it) or exclusively owned by the DMA device. These helper functions
45 * represent the transitions between these two ownership states.
47 * Note, however, that on later ARMs, this notion does not work due to
48 * speculative prefetches. We model our approach on the assumption that
49 * the CPU does do speculative prefetches, which means we clean caches
50 * before transfers and delay cache invalidation until transfer completion.
53 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
54 size_t, enum dma_data_direction
);
55 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
56 size_t, enum dma_data_direction
);
59 * arm_dma_map_page - map a portion of a page for streaming DMA
60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
61 * @page: page that buffer resides in
62 * @offset: offset into page for start of buffer
63 * @size: size of buffer to map
64 * @dir: DMA transfer direction
66 * Ensure that any data held in the cache is appropriately discarded
69 * The device owns this memory once this call has completed. The CPU
70 * can regain ownership by calling dma_unmap_page().
72 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
73 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
74 struct dma_attrs
*attrs
)
76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
77 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
78 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
81 static dma_addr_t
arm_coherent_dma_map_page(struct device
*dev
, struct page
*page
,
82 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
83 struct dma_attrs
*attrs
)
85 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
91 * @handle: DMA address of buffer
92 * @size: size of buffer (same as passed to dma_map_page)
93 * @dir: DMA transfer direction (same as passed to dma_map_page)
95 * Unmap a page streaming mode DMA translation. The handle and size
96 * must match what was provided in the previous dma_map_page() call.
97 * All other usages are undefined.
99 * After this call, reads by the CPU to the buffer are guaranteed to see
100 * whatever the device wrote there.
102 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
103 size_t size
, enum dma_data_direction dir
,
104 struct dma_attrs
*attrs
)
106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
108 handle
& ~PAGE_MASK
, size
, dir
);
111 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
112 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
114 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
115 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
116 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
119 static void arm_dma_sync_single_for_device(struct device
*dev
,
120 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
122 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
123 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
124 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
127 struct dma_map_ops arm_dma_ops
= {
128 .alloc
= arm_dma_alloc
,
129 .free
= arm_dma_free
,
130 .mmap
= arm_dma_mmap
,
131 .get_sgtable
= arm_dma_get_sgtable
,
132 .map_page
= arm_dma_map_page
,
133 .unmap_page
= arm_dma_unmap_page
,
134 .map_sg
= arm_dma_map_sg
,
135 .unmap_sg
= arm_dma_unmap_sg
,
136 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
137 .sync_single_for_device
= arm_dma_sync_single_for_device
,
138 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
139 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
140 .set_dma_mask
= arm_dma_set_mask
,
142 EXPORT_SYMBOL(arm_dma_ops
);
144 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
145 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
);
146 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
147 dma_addr_t handle
, struct dma_attrs
*attrs
);
149 struct dma_map_ops arm_coherent_dma_ops
= {
150 .alloc
= arm_coherent_dma_alloc
,
151 .free
= arm_coherent_dma_free
,
152 .mmap
= arm_dma_mmap
,
153 .get_sgtable
= arm_dma_get_sgtable
,
154 .map_page
= arm_coherent_dma_map_page
,
155 .map_sg
= arm_dma_map_sg
,
156 .set_dma_mask
= arm_dma_set_mask
,
158 EXPORT_SYMBOL(arm_coherent_dma_ops
);
160 static u64
get_coherent_dma_mask(struct device
*dev
)
162 u64 mask
= (u64
)DMA_BIT_MASK(32);
165 mask
= dev
->coherent_dma_mask
;
168 * Sanity check the DMA mask - it must be non-zero, and
169 * must be able to be satisfied by a DMA allocation.
172 dev_warn(dev
, "coherent DMA mask is unset\n");
177 * If the mask allows for more memory than we can address,
178 * and we actually have that much memory, then fail the
181 if (sizeof(mask
) != sizeof(dma_addr_t
) &&
182 mask
> (dma_addr_t
)~0 &&
183 dma_to_pfn(dev
, ~0) > arm_dma_pfn_limit
) {
184 dev_warn(dev
, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
186 dev_warn(dev
, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
195 if (dma_to_pfn(dev
, mask
) < arm_dma_pfn_limit
) {
196 dev_warn(dev
, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
198 dma_to_pfn(dev
, 0), dma_to_pfn(dev
, mask
) + 1,
199 arm_dma_pfn_limit
+ 1);
207 static void __dma_clear_buffer(struct page
*page
, size_t size
)
210 * Ensure that the allocated pages are zeroed, and that any data
211 * lurking in the kernel direct-mapped region is invalidated.
213 if (PageHighMem(page
)) {
214 phys_addr_t base
= __pfn_to_phys(page_to_pfn(page
));
215 phys_addr_t end
= base
+ size
;
217 void *ptr
= kmap_atomic(page
);
218 memset(ptr
, 0, PAGE_SIZE
);
219 dmac_flush_range(ptr
, ptr
+ PAGE_SIZE
);
224 outer_flush_range(base
, end
);
226 void *ptr
= page_address(page
);
227 memset(ptr
, 0, size
);
228 dmac_flush_range(ptr
, ptr
+ size
);
229 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
234 * Allocate a DMA buffer for 'dev' of size 'size' using the
235 * specified gfp mask. Note that 'size' must be page aligned.
237 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
239 unsigned long order
= get_order(size
);
240 struct page
*page
, *p
, *e
;
242 page
= alloc_pages(gfp
, order
);
247 * Now split the huge page and free the excess pages
249 split_page(page
, order
);
250 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
253 __dma_clear_buffer(page
, size
);
259 * Free a DMA buffer. 'size' must be page aligned.
261 static void __dma_free_buffer(struct page
*page
, size_t size
)
263 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
272 #ifdef CONFIG_HUGETLB_PAGE
273 #warning ARM Coherent DMA allocator does not (yet) support huge TLB
276 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
277 pgprot_t prot
, struct page
**ret_page
,
280 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
281 pgprot_t prot
, struct page
**ret_page
,
285 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
288 struct vm_struct
*area
;
292 * DMA allocation can be mapped to user space, so lets
293 * set VM_USERMAP flags too.
295 area
= get_vm_area_caller(size
, VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
299 addr
= (unsigned long)area
->addr
;
300 area
->phys_addr
= __pfn_to_phys(page_to_pfn(page
));
302 if (ioremap_page_range(addr
, addr
+ size
, area
->phys_addr
, prot
)) {
303 vunmap((void *)addr
);
309 static void __dma_free_remap(void *cpu_addr
, size_t size
)
311 unsigned int flags
= VM_ARM_DMA_CONSISTENT
| VM_USERMAP
;
312 struct vm_struct
*area
= find_vm_area(cpu_addr
);
313 if (!area
|| (area
->flags
& flags
) != flags
) {
314 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
317 unmap_kernel_range((unsigned long)cpu_addr
, size
);
321 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
326 unsigned long *bitmap
;
327 unsigned long nr_pages
;
332 static struct dma_pool atomic_pool
= {
333 .size
= DEFAULT_DMA_COHERENT_POOL_SIZE
,
336 static int __init
early_coherent_pool(char *p
)
338 atomic_pool
.size
= memparse(p
, &p
);
341 early_param("coherent_pool", early_coherent_pool
);
343 void __init
init_dma_coherent_pool_size(unsigned long size
)
346 * Catch any attempt to set the pool size too late.
348 BUG_ON(atomic_pool
.vaddr
);
351 * Set architecture specific coherent pool size only if
352 * it has not been changed by kernel command line parameter.
354 if (atomic_pool
.size
== DEFAULT_DMA_COHERENT_POOL_SIZE
)
355 atomic_pool
.size
= size
;
359 * Initialise the coherent pool for atomic allocations.
361 static int __init
atomic_pool_init(void)
363 struct dma_pool
*pool
= &atomic_pool
;
364 pgprot_t prot
= pgprot_dmacoherent(pgprot_kernel
);
365 gfp_t gfp
= GFP_KERNEL
| GFP_DMA
;
366 unsigned long nr_pages
= pool
->size
>> PAGE_SHIFT
;
367 unsigned long *bitmap
;
371 int bitmap_size
= BITS_TO_LONGS(nr_pages
) * sizeof(long);
373 bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
377 pages
= kzalloc(nr_pages
* sizeof(struct page
*), GFP_KERNEL
);
381 if (IS_ENABLED(CONFIG_DMA_CMA
))
382 ptr
= __alloc_from_contiguous(NULL
, pool
->size
, prot
, &page
,
385 ptr
= __alloc_remap_buffer(NULL
, pool
->size
, gfp
, prot
, &page
,
390 for (i
= 0; i
< nr_pages
; i
++)
393 spin_lock_init(&pool
->lock
);
396 pool
->bitmap
= bitmap
;
397 pool
->nr_pages
= nr_pages
;
398 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
399 (unsigned)pool
->size
/ 1024);
407 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
408 (unsigned)pool
->size
/ 1024);
412 * CMA is activated by core_initcall, so we must be called after it.
414 postcore_initcall(atomic_pool_init
);
416 struct dma_contig_early_reserve
{
421 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
423 static int dma_mmu_remap_num __initdata
;
425 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
427 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
428 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
432 void __init
dma_contiguous_remap(void)
435 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
436 phys_addr_t start
= dma_mmu_remap
[i
].base
;
437 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
441 if (end
> arm_lowmem_limit
)
442 end
= arm_lowmem_limit
;
446 map
.pfn
= __phys_to_pfn(start
);
447 map
.virtual = __phys_to_virt(start
);
448 map
.length
= end
- start
;
449 map
.type
= MT_MEMORY_DMA_READY
;
452 * Clear previous low-memory mapping
454 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
456 pmd_clear(pmd_off_k(addr
));
458 iotable_init(&map
, 1);
462 static int __dma_update_pte(pte_t
*pte
, pgtable_t token
, unsigned long addr
,
465 struct page
*page
= virt_to_page(addr
);
466 pgprot_t prot
= *(pgprot_t
*)data
;
468 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
472 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
474 unsigned long start
= (unsigned long) page_address(page
);
475 unsigned end
= start
+ size
;
477 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
478 flush_tlb_kernel_range(start
, end
);
481 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
482 pgprot_t prot
, struct page
**ret_page
,
487 page
= __dma_alloc_buffer(dev
, size
, gfp
);
491 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
493 __dma_free_buffer(page
, size
);
501 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
503 struct dma_pool
*pool
= &atomic_pool
;
504 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
508 unsigned long align_mask
;
511 WARN(1, "coherent pool not initialised!\n");
516 * Align the region allocation - allocations from pool are rather
517 * small, so align them to their order in pages, minimum is a page
518 * size. This helps reduce fragmentation of the DMA space.
520 align_mask
= (1 << get_order(size
)) - 1;
522 spin_lock_irqsave(&pool
->lock
, flags
);
523 pageno
= bitmap_find_next_zero_area(pool
->bitmap
, pool
->nr_pages
,
524 0, count
, align_mask
);
525 if (pageno
< pool
->nr_pages
) {
526 bitmap_set(pool
->bitmap
, pageno
, count
);
527 ptr
= pool
->vaddr
+ PAGE_SIZE
* pageno
;
528 *ret_page
= pool
->pages
[pageno
];
530 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
531 "Please increase it with coherent_pool= kernel parameter!\n",
532 (unsigned)pool
->size
/ 1024);
534 spin_unlock_irqrestore(&pool
->lock
, flags
);
539 static bool __in_atomic_pool(void *start
, size_t size
)
541 struct dma_pool
*pool
= &atomic_pool
;
542 void *end
= start
+ size
;
543 void *pool_start
= pool
->vaddr
;
544 void *pool_end
= pool
->vaddr
+ pool
->size
;
546 if (start
< pool_start
|| start
>= pool_end
)
552 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
553 start
, end
- 1, pool_start
, pool_end
- 1);
558 static int __free_from_pool(void *start
, size_t size
)
560 struct dma_pool
*pool
= &atomic_pool
;
561 unsigned long pageno
, count
;
564 if (!__in_atomic_pool(start
, size
))
567 pageno
= (start
- pool
->vaddr
) >> PAGE_SHIFT
;
568 count
= size
>> PAGE_SHIFT
;
570 spin_lock_irqsave(&pool
->lock
, flags
);
571 bitmap_clear(pool
->bitmap
, pageno
, count
);
572 spin_unlock_irqrestore(&pool
->lock
, flags
);
577 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
578 pgprot_t prot
, struct page
**ret_page
,
581 unsigned long order
= get_order(size
);
582 size_t count
= size
>> PAGE_SHIFT
;
586 page
= dma_alloc_from_contiguous(dev
, count
, order
);
590 __dma_clear_buffer(page
, size
);
592 if (PageHighMem(page
)) {
593 ptr
= __dma_alloc_remap(page
, size
, GFP_KERNEL
, prot
, caller
);
595 dma_release_from_contiguous(dev
, page
, count
);
599 __dma_remap(page
, size
, prot
);
600 ptr
= page_address(page
);
606 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
607 void *cpu_addr
, size_t size
)
609 if (PageHighMem(page
))
610 __dma_free_remap(cpu_addr
, size
);
612 __dma_remap(page
, size
, pgprot_kernel
);
613 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
616 static inline pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
)
618 prot
= dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
) ?
619 pgprot_writecombine(prot
) :
620 pgprot_dmacoherent(prot
);
626 #else /* !CONFIG_MMU */
630 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
631 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
632 #define __alloc_from_pool(size, ret_page) NULL
633 #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
634 #define __free_from_pool(cpu_addr, size) 0
635 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
636 #define __dma_free_remap(cpu_addr, size) do { } while (0)
638 #endif /* CONFIG_MMU */
640 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
641 struct page
**ret_page
)
644 page
= __dma_alloc_buffer(dev
, size
, gfp
);
649 return page_address(page
);
654 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
655 gfp_t gfp
, pgprot_t prot
, bool is_coherent
, const void *caller
)
657 u64 mask
= get_coherent_dma_mask(dev
);
658 struct page
*page
= NULL
;
661 #ifdef CONFIG_DMA_API_DEBUG
662 u64 limit
= (mask
+ 1) & ~mask
;
663 if (limit
&& size
>= limit
) {
664 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
673 if (mask
< 0xffffffffULL
)
677 * Following is a work-around (a.k.a. hack) to prevent pages
678 * with __GFP_COMP being passed to split_page() which cannot
679 * handle them. The real problem is that this flag probably
680 * should be 0 on ARM as it is not supported on this
681 * platform; see CONFIG_HUGETLBFS.
683 gfp
&= ~(__GFP_COMP
);
685 *handle
= DMA_ERROR_CODE
;
686 size
= PAGE_ALIGN(size
);
688 if (is_coherent
|| nommu())
689 addr
= __alloc_simple_buffer(dev
, size
, gfp
, &page
);
690 else if (!(gfp
& __GFP_WAIT
))
691 addr
= __alloc_from_pool(size
, &page
);
692 else if (!IS_ENABLED(CONFIG_DMA_CMA
))
693 addr
= __alloc_remap_buffer(dev
, size
, gfp
, prot
, &page
, caller
);
695 addr
= __alloc_from_contiguous(dev
, size
, prot
, &page
, caller
);
698 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
704 * Allocate DMA-coherent memory space and return both the kernel remapped
705 * virtual and bus address for that space.
707 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
708 gfp_t gfp
, struct dma_attrs
*attrs
)
710 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
713 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
716 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, false,
717 __builtin_return_address(0));
720 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
721 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
723 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
726 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
729 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, true,
730 __builtin_return_address(0));
734 * Create userspace mapping for the DMA-coherent memory.
736 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
737 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
738 struct dma_attrs
*attrs
)
742 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
743 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
744 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
745 unsigned long off
= vma
->vm_pgoff
;
747 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
749 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
752 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
753 ret
= remap_pfn_range(vma
, vma
->vm_start
,
755 vma
->vm_end
- vma
->vm_start
,
758 #endif /* CONFIG_MMU */
764 * Free a buffer as defined by the above mapping.
766 static void __arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
767 dma_addr_t handle
, struct dma_attrs
*attrs
,
770 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
772 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
775 size
= PAGE_ALIGN(size
);
777 if (is_coherent
|| nommu()) {
778 __dma_free_buffer(page
, size
);
779 } else if (__free_from_pool(cpu_addr
, size
)) {
781 } else if (!IS_ENABLED(CONFIG_DMA_CMA
)) {
782 __dma_free_remap(cpu_addr
, size
);
783 __dma_free_buffer(page
, size
);
786 * Non-atomic allocations cannot be freed with IRQs disabled
788 WARN_ON(irqs_disabled());
789 __free_from_contiguous(dev
, page
, cpu_addr
, size
);
793 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
794 dma_addr_t handle
, struct dma_attrs
*attrs
)
796 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, false);
799 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
800 dma_addr_t handle
, struct dma_attrs
*attrs
)
802 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, true);
805 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
806 void *cpu_addr
, dma_addr_t handle
, size_t size
,
807 struct dma_attrs
*attrs
)
809 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
812 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
816 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
820 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
821 size_t size
, enum dma_data_direction dir
,
822 void (*op
)(const void *, size_t, int))
827 pfn
= page_to_pfn(page
) + offset
/ PAGE_SIZE
;
831 * A single sg entry may refer to multiple physically contiguous
832 * pages. But we still need to process highmem pages individually.
833 * If highmem is not configured then the bulk of this loop gets
840 page
= pfn_to_page(pfn
);
842 if (PageHighMem(page
)) {
843 if (len
+ offset
> PAGE_SIZE
)
844 len
= PAGE_SIZE
- offset
;
846 if (cache_is_vipt_nonaliasing()) {
847 vaddr
= kmap_atomic(page
);
848 op(vaddr
+ offset
, len
, dir
);
849 kunmap_atomic(vaddr
);
851 vaddr
= kmap_high_get(page
);
853 op(vaddr
+ offset
, len
, dir
);
858 vaddr
= page_address(page
) + offset
;
868 * Make an area consistent for devices.
869 * Note: Drivers should NOT use this function directly, as it will break
870 * platforms with CONFIG_DMABOUNCE.
871 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
873 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
874 size_t size
, enum dma_data_direction dir
)
878 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
880 paddr
= page_to_phys(page
) + off
;
881 if (dir
== DMA_FROM_DEVICE
) {
882 outer_inv_range(paddr
, paddr
+ size
);
884 outer_clean_range(paddr
, paddr
+ size
);
886 /* FIXME: non-speculating: flush on bidirectional mappings? */
889 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
890 size_t size
, enum dma_data_direction dir
)
892 unsigned long paddr
= page_to_phys(page
) + off
;
894 /* FIXME: non-speculating: not required */
895 /* don't bother invalidating if DMA to device */
896 if (dir
!= DMA_TO_DEVICE
)
897 outer_inv_range(paddr
, paddr
+ size
);
899 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
902 * Mark the D-cache clean for these pages to avoid extra flushing.
904 if (dir
!= DMA_TO_DEVICE
&& size
>= PAGE_SIZE
) {
908 pfn
= page_to_pfn(page
) + off
/ PAGE_SIZE
;
912 left
-= PAGE_SIZE
- off
;
914 while (left
>= PAGE_SIZE
) {
915 page
= pfn_to_page(pfn
++);
916 set_bit(PG_dcache_clean
, &page
->flags
);
923 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
924 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
925 * @sg: list of buffers
926 * @nents: number of buffers to map
927 * @dir: DMA transfer direction
929 * Map a set of buffers described by scatterlist in streaming mode for DMA.
930 * This is the scatter-gather version of the dma_map_single interface.
931 * Here the scatter gather list elements are each tagged with the
932 * appropriate dma address and length. They are obtained via
933 * sg_dma_{address,length}.
935 * Device ownership issues as mentioned for dma_map_single are the same
938 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
939 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
941 struct dma_map_ops
*ops
= get_dma_ops(dev
);
942 struct scatterlist
*s
;
945 for_each_sg(sg
, s
, nents
, i
) {
946 #ifdef CONFIG_NEED_SG_DMA_LENGTH
947 s
->dma_length
= s
->length
;
949 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
950 s
->length
, dir
, attrs
);
951 if (dma_mapping_error(dev
, s
->dma_address
))
957 for_each_sg(sg
, s
, i
, j
)
958 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
963 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
964 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
965 * @sg: list of buffers
966 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
967 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
969 * Unmap a set of streaming mode DMA translations. Again, CPU access
970 * rules concerning calls here are the same as for dma_unmap_single().
972 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
973 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
975 struct dma_map_ops
*ops
= get_dma_ops(dev
);
976 struct scatterlist
*s
;
980 for_each_sg(sg
, s
, nents
, i
)
981 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
985 * arm_dma_sync_sg_for_cpu
986 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
987 * @sg: list of buffers
988 * @nents: number of buffers to map (returned from dma_map_sg)
989 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
991 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
992 int nents
, enum dma_data_direction dir
)
994 struct dma_map_ops
*ops
= get_dma_ops(dev
);
995 struct scatterlist
*s
;
998 for_each_sg(sg
, s
, nents
, i
)
999 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
1004 * arm_dma_sync_sg_for_device
1005 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1006 * @sg: list of buffers
1007 * @nents: number of buffers to map (returned from dma_map_sg)
1008 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1010 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1011 int nents
, enum dma_data_direction dir
)
1013 struct dma_map_ops
*ops
= get_dma_ops(dev
);
1014 struct scatterlist
*s
;
1017 for_each_sg(sg
, s
, nents
, i
)
1018 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
1023 * Return whether the given device DMA address mask can be supported
1024 * properly. For example, if your device can only drive the low 24-bits
1025 * during bus mastering, then you would pass 0x00ffffff as the mask
1028 int dma_supported(struct device
*dev
, u64 mask
)
1030 unsigned long limit
;
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1037 if (sizeof(mask
) != sizeof(dma_addr_t
) &&
1038 mask
> (dma_addr_t
)~0 &&
1039 dma_to_pfn(dev
, ~0) > arm_dma_pfn_limit
)
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1046 limit
= dma_to_pfn(dev
, mask
);
1048 if (limit
< arm_dma_pfn_limit
)
1053 EXPORT_SYMBOL(dma_supported
);
1055 int arm_dma_set_mask(struct device
*dev
, u64 dma_mask
)
1057 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
1060 *dev
->dma_mask
= dma_mask
;
1065 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
1067 static int __init
dma_debug_do_init(void)
1069 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
1072 fs_initcall(dma_debug_do_init
);
1074 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1078 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
1081 unsigned int order
= get_order(size
);
1082 unsigned int align
= 0;
1083 unsigned int count
, start
;
1084 unsigned long flags
;
1086 if (order
> CONFIG_ARM_DMA_IOMMU_ALIGNMENT
)
1087 order
= CONFIG_ARM_DMA_IOMMU_ALIGNMENT
;
1089 count
= ((PAGE_ALIGN(size
) >> PAGE_SHIFT
) +
1090 (1 << mapping
->order
) - 1) >> mapping
->order
;
1092 if (order
> mapping
->order
)
1093 align
= (1 << (order
- mapping
->order
)) - 1;
1095 spin_lock_irqsave(&mapping
->lock
, flags
);
1096 start
= bitmap_find_next_zero_area(mapping
->bitmap
, mapping
->bits
, 0,
1098 if (start
> mapping
->bits
) {
1099 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1100 return DMA_ERROR_CODE
;
1103 bitmap_set(mapping
->bitmap
, start
, count
);
1104 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1106 return mapping
->base
+ (start
<< (mapping
->order
+ PAGE_SHIFT
));
1109 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
1110 dma_addr_t addr
, size_t size
)
1112 unsigned int start
= (addr
- mapping
->base
) >>
1113 (mapping
->order
+ PAGE_SHIFT
);
1114 unsigned int count
= ((size
>> PAGE_SHIFT
) +
1115 (1 << mapping
->order
) - 1) >> mapping
->order
;
1116 unsigned long flags
;
1118 spin_lock_irqsave(&mapping
->lock
, flags
);
1119 bitmap_clear(mapping
->bitmap
, start
, count
);
1120 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1123 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
,
1124 gfp_t gfp
, struct dma_attrs
*attrs
)
1126 struct page
**pages
;
1127 int count
= size
>> PAGE_SHIFT
;
1128 int array_size
= count
* sizeof(struct page
*);
1131 if (array_size
<= PAGE_SIZE
)
1132 pages
= kzalloc(array_size
, gfp
);
1134 pages
= vzalloc(array_size
);
1138 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
))
1140 unsigned long order
= get_order(size
);
1143 page
= dma_alloc_from_contiguous(dev
, count
, order
);
1147 __dma_clear_buffer(page
, size
);
1149 for (i
= 0; i
< count
; i
++)
1150 pages
[i
] = page
+ i
;
1156 * IOMMU can map any pages, so himem can also be used here
1158 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
1161 int j
, order
= __fls(count
);
1163 pages
[i
] = alloc_pages(gfp
, order
);
1164 while (!pages
[i
] && order
)
1165 pages
[i
] = alloc_pages(gfp
, --order
);
1170 split_page(pages
[i
], order
);
1173 pages
[i
+ j
] = pages
[i
] + j
;
1176 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
);
1178 count
-= 1 << order
;
1185 __free_pages(pages
[i
], 0);
1186 if (array_size
<= PAGE_SIZE
)
1193 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
,
1194 size_t size
, struct dma_attrs
*attrs
)
1196 int count
= size
>> PAGE_SHIFT
;
1197 int array_size
= count
* sizeof(struct page
*);
1200 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS
, attrs
)) {
1201 dma_release_from_contiguous(dev
, pages
[0], count
);
1203 for (i
= 0; i
< count
; i
++)
1205 __free_pages(pages
[i
], 0);
1208 if (array_size
<= PAGE_SIZE
)
1216 * Create a CPU mapping for a specified pages
1219 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1222 unsigned int i
, nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1223 struct vm_struct
*area
;
1226 area
= get_vm_area_caller(size
, VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
1231 area
->pages
= pages
;
1232 area
->nr_pages
= nr_pages
;
1233 p
= (unsigned long)area
->addr
;
1235 for (i
= 0; i
< nr_pages
; i
++) {
1236 phys_addr_t phys
= __pfn_to_phys(page_to_pfn(pages
[i
]));
1237 if (ioremap_page_range(p
, p
+ PAGE_SIZE
, phys
, prot
))
1243 unmap_kernel_range((unsigned long)area
->addr
, size
);
1249 * Create a mapping in device IO address space for specified pages
1252 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
)
1254 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1255 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1256 dma_addr_t dma_addr
, iova
;
1257 int i
, ret
= DMA_ERROR_CODE
;
1259 dma_addr
= __alloc_iova(mapping
, size
);
1260 if (dma_addr
== DMA_ERROR_CODE
)
1264 for (i
= 0; i
< count
; ) {
1265 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1266 phys_addr_t phys
= page_to_phys(pages
[i
]);
1267 unsigned int len
, j
;
1269 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1270 if (page_to_pfn(pages
[j
]) != next_pfn
)
1273 len
= (j
- i
) << PAGE_SHIFT
;
1274 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
,
1275 IOMMU_READ
|IOMMU_WRITE
);
1283 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1284 __free_iova(mapping
, dma_addr
, size
);
1285 return DMA_ERROR_CODE
;
1288 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1290 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1293 * add optional in-page offset from iova to size and align
1294 * result to page size
1296 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1299 iommu_unmap(mapping
->domain
, iova
, size
);
1300 __free_iova(mapping
, iova
, size
);
1304 static struct page
**__atomic_get_pages(void *addr
)
1306 struct dma_pool
*pool
= &atomic_pool
;
1307 struct page
**pages
= pool
->pages
;
1308 int offs
= (addr
- pool
->vaddr
) >> PAGE_SHIFT
;
1310 return pages
+ offs
;
1313 static struct page
**__iommu_get_pages(void *cpu_addr
, struct dma_attrs
*attrs
)
1315 struct vm_struct
*area
;
1317 if (__in_atomic_pool(cpu_addr
, PAGE_SIZE
))
1318 return __atomic_get_pages(cpu_addr
);
1320 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1323 area
= find_vm_area(cpu_addr
);
1324 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1329 static void *__iommu_alloc_atomic(struct device
*dev
, size_t size
,
1335 addr
= __alloc_from_pool(size
, &page
);
1339 *handle
= __iommu_create_mapping(dev
, &page
, size
);
1340 if (*handle
== DMA_ERROR_CODE
)
1346 __free_from_pool(addr
, size
);
1350 static void __iommu_free_atomic(struct device
*dev
, void *cpu_addr
,
1351 dma_addr_t handle
, size_t size
)
1353 __iommu_remove_mapping(dev
, handle
, size
);
1354 __free_from_pool(cpu_addr
, size
);
1357 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1358 dma_addr_t
*handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
1360 pgprot_t prot
= __get_dma_pgprot(attrs
, pgprot_kernel
);
1361 struct page
**pages
;
1364 *handle
= DMA_ERROR_CODE
;
1365 size
= PAGE_ALIGN(size
);
1367 if (gfp
& GFP_ATOMIC
)
1368 return __iommu_alloc_atomic(dev
, size
, handle
);
1371 * Following is a work-around (a.k.a. hack) to prevent pages
1372 * with __GFP_COMP being passed to split_page() which cannot
1373 * handle them. The real problem is that this flag probably
1374 * should be 0 on ARM as it is not supported on this
1375 * platform; see CONFIG_HUGETLBFS.
1377 gfp
&= ~(__GFP_COMP
);
1379 pages
= __iommu_alloc_buffer(dev
, size
, gfp
, attrs
);
1383 *handle
= __iommu_create_mapping(dev
, pages
, size
);
1384 if (*handle
== DMA_ERROR_CODE
)
1387 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
))
1390 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1391 __builtin_return_address(0));
1398 __iommu_remove_mapping(dev
, *handle
, size
);
1400 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1404 static int arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1405 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1406 struct dma_attrs
*attrs
)
1408 unsigned long uaddr
= vma
->vm_start
;
1409 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1410 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1412 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1418 int ret
= vm_insert_page(vma
, uaddr
, *pages
++);
1420 pr_err("Remapping memory failed: %d\n", ret
);
1425 } while (usize
> 0);
1431 * free a page as defined by the above mapping.
1432 * Must not be called with IRQs disabled.
1434 void arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1435 dma_addr_t handle
, struct dma_attrs
*attrs
)
1437 struct page
**pages
;
1438 size
= PAGE_ALIGN(size
);
1440 if (__in_atomic_pool(cpu_addr
, size
)) {
1441 __iommu_free_atomic(dev
, cpu_addr
, handle
, size
);
1445 pages
= __iommu_get_pages(cpu_addr
, attrs
);
1447 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1451 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING
, attrs
)) {
1452 unmap_kernel_range((unsigned long)cpu_addr
, size
);
1456 __iommu_remove_mapping(dev
, handle
, size
);
1457 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1460 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1461 void *cpu_addr
, dma_addr_t dma_addr
,
1462 size_t size
, struct dma_attrs
*attrs
)
1464 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1465 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1470 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1474 static int __dma_direction_to_prot(enum dma_data_direction dir
)
1479 case DMA_BIDIRECTIONAL
:
1480 prot
= IOMMU_READ
| IOMMU_WRITE
;
1485 case DMA_FROM_DEVICE
:
1496 * Map a part of the scatter-gather list into contiguous io address space
1498 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1499 size_t size
, dma_addr_t
*handle
,
1500 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1503 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1504 dma_addr_t iova
, iova_base
;
1507 struct scatterlist
*s
;
1510 size
= PAGE_ALIGN(size
);
1511 *handle
= DMA_ERROR_CODE
;
1513 iova_base
= iova
= __alloc_iova(mapping
, size
);
1514 if (iova
== DMA_ERROR_CODE
)
1517 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1518 phys_addr_t phys
= page_to_phys(sg_page(s
));
1519 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1522 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1523 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1525 prot
= __dma_direction_to_prot(dir
);
1527 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, prot
);
1530 count
+= len
>> PAGE_SHIFT
;
1533 *handle
= iova_base
;
1537 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1538 __free_iova(mapping
, iova_base
, size
);
1542 static int __iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1543 enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1546 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1548 unsigned int offset
= s
->offset
;
1549 unsigned int size
= s
->offset
+ s
->length
;
1550 unsigned int max
= dma_get_max_seg_size(dev
);
1552 for (i
= 1; i
< nents
; i
++) {
1555 s
->dma_address
= DMA_ERROR_CODE
;
1558 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1559 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1560 dir
, attrs
, is_coherent
) < 0)
1563 dma
->dma_address
+= offset
;
1564 dma
->dma_length
= size
- offset
;
1566 size
= offset
= s
->offset
;
1573 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
,
1577 dma
->dma_address
+= offset
;
1578 dma
->dma_length
= size
- offset
;
1583 for_each_sg(sg
, s
, count
, i
)
1584 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1589 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1590 * @dev: valid struct device pointer
1591 * @sg: list of buffers
1592 * @nents: number of buffers to map
1593 * @dir: DMA transfer direction
1595 * Map a set of i/o coherent buffers described by scatterlist in streaming
1596 * mode for DMA. The scatter gather list elements are merged together (if
1597 * possible) and tagged with the appropriate dma address and length. They are
1598 * obtained via sg_dma_{address,length}.
1600 int arm_coherent_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1601 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1603 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, true);
1607 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1608 * @dev: valid struct device pointer
1609 * @sg: list of buffers
1610 * @nents: number of buffers to map
1611 * @dir: DMA transfer direction
1613 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1614 * The scatter gather list elements are merged together (if possible) and
1615 * tagged with the appropriate dma address and length. They are obtained via
1616 * sg_dma_{address,length}.
1618 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1619 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1621 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, false);
1624 static void __iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1625 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
,
1628 struct scatterlist
*s
;
1631 for_each_sg(sg
, s
, nents
, i
) {
1633 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1636 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1637 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1643 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1644 * @dev: valid struct device pointer
1645 * @sg: list of buffers
1646 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1647 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1649 * Unmap a set of streaming mode DMA translations. Again, CPU access
1650 * rules concerning calls here are the same as for dma_unmap_single().
1652 void arm_coherent_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1653 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1655 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, true);
1659 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1660 * @dev: valid struct device pointer
1661 * @sg: list of buffers
1662 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1663 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1665 * Unmap a set of streaming mode DMA translations. Again, CPU access
1666 * rules concerning calls here are the same as for dma_unmap_single().
1668 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1669 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1671 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, false);
1675 * arm_iommu_sync_sg_for_cpu
1676 * @dev: valid struct device pointer
1677 * @sg: list of buffers
1678 * @nents: number of buffers to map (returned from dma_map_sg)
1679 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1681 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1682 int nents
, enum dma_data_direction dir
)
1684 struct scatterlist
*s
;
1687 for_each_sg(sg
, s
, nents
, i
)
1688 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1693 * arm_iommu_sync_sg_for_device
1694 * @dev: valid struct device pointer
1695 * @sg: list of buffers
1696 * @nents: number of buffers to map (returned from dma_map_sg)
1697 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1699 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1700 int nents
, enum dma_data_direction dir
)
1702 struct scatterlist
*s
;
1705 for_each_sg(sg
, s
, nents
, i
)
1706 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1711 * arm_coherent_iommu_map_page
1712 * @dev: valid struct device pointer
1713 * @page: page that buffer resides in
1714 * @offset: offset into page for start of buffer
1715 * @size: size of buffer to map
1716 * @dir: DMA transfer direction
1718 * Coherent IOMMU aware version of arm_dma_map_page()
1720 static dma_addr_t
arm_coherent_iommu_map_page(struct device
*dev
, struct page
*page
,
1721 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1722 struct dma_attrs
*attrs
)
1724 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1725 dma_addr_t dma_addr
;
1726 int ret
, prot
, len
= PAGE_ALIGN(size
+ offset
);
1728 dma_addr
= __alloc_iova(mapping
, len
);
1729 if (dma_addr
== DMA_ERROR_CODE
)
1732 prot
= __dma_direction_to_prot(dir
);
1734 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, prot
);
1738 return dma_addr
+ offset
;
1740 __free_iova(mapping
, dma_addr
, len
);
1741 return DMA_ERROR_CODE
;
1745 * arm_iommu_map_page
1746 * @dev: valid struct device pointer
1747 * @page: page that buffer resides in
1748 * @offset: offset into page for start of buffer
1749 * @size: size of buffer to map
1750 * @dir: DMA transfer direction
1752 * IOMMU aware version of arm_dma_map_page()
1754 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1755 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1756 struct dma_attrs
*attrs
)
1758 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1759 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1761 return arm_coherent_iommu_map_page(dev
, page
, offset
, size
, dir
, attrs
);
1765 * arm_coherent_iommu_unmap_page
1766 * @dev: valid struct device pointer
1767 * @handle: DMA address of buffer
1768 * @size: size of buffer (same as passed to dma_map_page)
1769 * @dir: DMA transfer direction (same as passed to dma_map_page)
1771 * Coherent IOMMU aware version of arm_dma_unmap_page()
1773 static void arm_coherent_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1774 size_t size
, enum dma_data_direction dir
,
1775 struct dma_attrs
*attrs
)
1777 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1778 dma_addr_t iova
= handle
& PAGE_MASK
;
1779 int offset
= handle
& ~PAGE_MASK
;
1780 int len
= PAGE_ALIGN(size
+ offset
);
1785 iommu_unmap(mapping
->domain
, iova
, len
);
1786 __free_iova(mapping
, iova
, len
);
1790 * arm_iommu_unmap_page
1791 * @dev: valid struct device pointer
1792 * @handle: DMA address of buffer
1793 * @size: size of buffer (same as passed to dma_map_page)
1794 * @dir: DMA transfer direction (same as passed to dma_map_page)
1796 * IOMMU aware version of arm_dma_unmap_page()
1798 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1799 size_t size
, enum dma_data_direction dir
,
1800 struct dma_attrs
*attrs
)
1802 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1803 dma_addr_t iova
= handle
& PAGE_MASK
;
1804 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1805 int offset
= handle
& ~PAGE_MASK
;
1806 int len
= PAGE_ALIGN(size
+ offset
);
1811 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
1812 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1814 iommu_unmap(mapping
->domain
, iova
, len
);
1815 __free_iova(mapping
, iova
, len
);
1818 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
1819 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1821 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1822 dma_addr_t iova
= handle
& PAGE_MASK
;
1823 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1824 unsigned int offset
= handle
& ~PAGE_MASK
;
1829 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1832 static void arm_iommu_sync_single_for_device(struct device
*dev
,
1833 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
1835 struct dma_iommu_mapping
*mapping
= dev
->archdata
.mapping
;
1836 dma_addr_t iova
= handle
& PAGE_MASK
;
1837 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1838 unsigned int offset
= handle
& ~PAGE_MASK
;
1843 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1846 struct dma_map_ops iommu_ops
= {
1847 .alloc
= arm_iommu_alloc_attrs
,
1848 .free
= arm_iommu_free_attrs
,
1849 .mmap
= arm_iommu_mmap_attrs
,
1850 .get_sgtable
= arm_iommu_get_sgtable
,
1852 .map_page
= arm_iommu_map_page
,
1853 .unmap_page
= arm_iommu_unmap_page
,
1854 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
1855 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
1857 .map_sg
= arm_iommu_map_sg
,
1858 .unmap_sg
= arm_iommu_unmap_sg
,
1859 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
1860 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
1862 .set_dma_mask
= arm_dma_set_mask
,
1865 struct dma_map_ops iommu_coherent_ops
= {
1866 .alloc
= arm_iommu_alloc_attrs
,
1867 .free
= arm_iommu_free_attrs
,
1868 .mmap
= arm_iommu_mmap_attrs
,
1869 .get_sgtable
= arm_iommu_get_sgtable
,
1871 .map_page
= arm_coherent_iommu_map_page
,
1872 .unmap_page
= arm_coherent_iommu_unmap_page
,
1874 .map_sg
= arm_coherent_iommu_map_sg
,
1875 .unmap_sg
= arm_coherent_iommu_unmap_sg
,
1877 .set_dma_mask
= arm_dma_set_mask
,
1881 * arm_iommu_create_mapping
1882 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1883 * @base: start address of the valid IO address space
1884 * @size: size of the valid IO address space
1885 * @order: accuracy of the IO addresses allocations
1887 * Creates a mapping structure which holds information about used/unused
1888 * IO address ranges, which is required to perform memory allocation and
1889 * mapping with IOMMU aware functions.
1891 * The client device need to be attached to the mapping with
1892 * arm_iommu_attach_device function.
1894 struct dma_iommu_mapping
*
1895 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, size_t size
,
1898 unsigned int count
= size
>> (PAGE_SHIFT
+ order
);
1899 unsigned int bitmap_size
= BITS_TO_LONGS(count
) * sizeof(long);
1900 struct dma_iommu_mapping
*mapping
;
1904 return ERR_PTR(-EINVAL
);
1906 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
1910 mapping
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
1911 if (!mapping
->bitmap
)
1914 mapping
->base
= base
;
1915 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
1916 mapping
->order
= order
;
1917 spin_lock_init(&mapping
->lock
);
1919 mapping
->domain
= iommu_domain_alloc(bus
);
1920 if (!mapping
->domain
)
1923 kref_init(&mapping
->kref
);
1926 kfree(mapping
->bitmap
);
1930 return ERR_PTR(err
);
1932 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping
);
1934 static void release_iommu_mapping(struct kref
*kref
)
1936 struct dma_iommu_mapping
*mapping
=
1937 container_of(kref
, struct dma_iommu_mapping
, kref
);
1939 iommu_domain_free(mapping
->domain
);
1940 kfree(mapping
->bitmap
);
1944 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
1947 kref_put(&mapping
->kref
, release_iommu_mapping
);
1949 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping
);
1952 * arm_iommu_attach_device
1953 * @dev: valid struct device pointer
1954 * @mapping: io address space mapping structure (returned from
1955 * arm_iommu_create_mapping)
1957 * Attaches specified io address space mapping to the provided device,
1958 * this replaces the dma operations (dma_map_ops pointer) with the
1959 * IOMMU aware version. More than one client might be attached to
1960 * the same io address space mapping.
1962 int arm_iommu_attach_device(struct device
*dev
,
1963 struct dma_iommu_mapping
*mapping
)
1967 err
= iommu_attach_device(mapping
->domain
, dev
);
1971 kref_get(&mapping
->kref
);
1972 dev
->archdata
.mapping
= mapping
;
1973 set_dma_ops(dev
, &iommu_ops
);
1975 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev
));
1978 EXPORT_SYMBOL_GPL(arm_iommu_attach_device
);
1981 * arm_iommu_detach_device
1982 * @dev: valid struct device pointer
1984 * Detaches the provided device from a previously attached map.
1985 * This voids the dma operations (dma_map_ops pointer)
1987 void arm_iommu_detach_device(struct device
*dev
)
1989 struct dma_iommu_mapping
*mapping
;
1991 mapping
= to_dma_iommu_mapping(dev
);
1993 dev_warn(dev
, "Not attached\n");
1997 iommu_detach_device(mapping
->domain
, dev
);
1998 kref_put(&mapping
->kref
, release_iommu_mapping
);
1999 dev
->archdata
.mapping
= NULL
;
2000 set_dma_ops(dev
, NULL
);
2002 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev
));
2004 EXPORT_SYMBOL_GPL(arm_iommu_detach_device
);