2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/genalloc.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dma-contiguous.h>
28 #include <linux/vmalloc.h>
29 #include <linux/swiotlb.h>
31 #include <asm/cacheflush.h>
33 static int swiotlb __read_mostly
;
35 static pgprot_t
__get_dma_pgprot(unsigned long attrs
, pgprot_t prot
,
38 if (!coherent
|| (attrs
& DMA_ATTR_WRITE_COMBINE
))
39 return pgprot_writecombine(prot
);
43 static struct gen_pool
*atomic_pool
;
45 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
46 static size_t atomic_pool_size __initdata
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
48 static int __init
early_coherent_pool(char *p
)
50 atomic_pool_size
= memparse(p
, &p
);
53 early_param("coherent_pool", early_coherent_pool
);
55 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
, gfp_t flags
)
61 WARN(1, "coherent pool not initialised!\n");
65 val
= gen_pool_alloc(atomic_pool
, size
);
67 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
69 *ret_page
= phys_to_page(phys
);
77 static bool __in_atomic_pool(void *start
, size_t size
)
79 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
82 static int __free_from_pool(void *start
, size_t size
)
84 if (!__in_atomic_pool(start
, size
))
87 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
92 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
93 dma_addr_t
*dma_handle
, gfp_t flags
,
97 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
101 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
102 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
104 if (dev_get_cma_area(dev
) && gfpflags_allow_blocking(flags
)) {
108 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
113 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
114 addr
= page_address(page
);
115 memset(addr
, 0, size
);
118 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
122 static void __dma_free_coherent(struct device
*dev
, size_t size
,
123 void *vaddr
, dma_addr_t dma_handle
,
127 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
130 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
134 freed
= dma_release_from_contiguous(dev
,
138 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
141 static void *__dma_alloc(struct device
*dev
, size_t size
,
142 dma_addr_t
*dma_handle
, gfp_t flags
,
146 void *ptr
, *coherent_ptr
;
147 bool coherent
= is_device_dma_coherent(dev
);
148 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, false);
150 size
= PAGE_ALIGN(size
);
152 if (!coherent
&& !gfpflags_allow_blocking(flags
)) {
153 struct page
*page
= NULL
;
154 void *addr
= __alloc_from_pool(size
, &page
, flags
);
157 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
162 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
166 /* no need for non-cacheable mapping if coherent */
170 /* remove any dirty cache lines on the kernel alias */
171 __dma_flush_range(ptr
, ptr
+ size
);
173 /* create a coherent mapping */
174 page
= virt_to_page(ptr
);
175 coherent_ptr
= dma_common_contiguous_remap(page
, size
, VM_USERMAP
,
183 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
185 *dma_handle
= DMA_ERROR_CODE
;
189 static void __dma_free(struct device
*dev
, size_t size
,
190 void *vaddr
, dma_addr_t dma_handle
,
193 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
195 size
= PAGE_ALIGN(size
);
197 if (!is_device_dma_coherent(dev
)) {
198 if (__free_from_pool(vaddr
, size
))
202 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
205 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
206 unsigned long offset
, size_t size
,
207 enum dma_data_direction dir
,
212 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
213 if (!is_device_dma_coherent(dev
))
214 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
220 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
221 size_t size
, enum dma_data_direction dir
,
224 if (!is_device_dma_coherent(dev
))
225 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
226 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
229 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
230 int nelems
, enum dma_data_direction dir
,
233 struct scatterlist
*sg
;
236 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
237 if (!is_device_dma_coherent(dev
))
238 for_each_sg(sgl
, sg
, ret
, i
)
239 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
245 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
246 struct scatterlist
*sgl
, int nelems
,
247 enum dma_data_direction dir
,
250 struct scatterlist
*sg
;
253 if (!is_device_dma_coherent(dev
))
254 for_each_sg(sgl
, sg
, nelems
, i
)
255 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
257 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
260 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
261 dma_addr_t dev_addr
, size_t size
,
262 enum dma_data_direction dir
)
264 if (!is_device_dma_coherent(dev
))
265 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
266 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
269 static void __swiotlb_sync_single_for_device(struct device
*dev
,
270 dma_addr_t dev_addr
, size_t size
,
271 enum dma_data_direction dir
)
273 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
274 if (!is_device_dma_coherent(dev
))
275 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
278 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
279 struct scatterlist
*sgl
, int nelems
,
280 enum dma_data_direction dir
)
282 struct scatterlist
*sg
;
285 if (!is_device_dma_coherent(dev
))
286 for_each_sg(sgl
, sg
, nelems
, i
)
287 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
289 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
292 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
293 struct scatterlist
*sgl
, int nelems
,
294 enum dma_data_direction dir
)
296 struct scatterlist
*sg
;
299 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
300 if (!is_device_dma_coherent(dev
))
301 for_each_sg(sgl
, sg
, nelems
, i
)
302 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
306 static int __swiotlb_mmap(struct device
*dev
,
307 struct vm_area_struct
*vma
,
308 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
312 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >>
314 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
315 unsigned long pfn
= dma_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
;
316 unsigned long off
= vma
->vm_pgoff
;
318 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
319 is_device_dma_coherent(dev
));
321 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
324 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
325 ret
= remap_pfn_range(vma
, vma
->vm_start
,
327 vma
->vm_end
- vma
->vm_start
,
334 static int __swiotlb_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
335 void *cpu_addr
, dma_addr_t handle
, size_t size
,
338 int ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
341 sg_set_page(sgt
->sgl
, phys_to_page(dma_to_phys(dev
, handle
)),
342 PAGE_ALIGN(size
), 0);
347 static int __swiotlb_dma_supported(struct device
*hwdev
, u64 mask
)
350 return swiotlb_dma_supported(hwdev
, mask
);
354 static struct dma_map_ops swiotlb_dma_ops
= {
355 .alloc
= __dma_alloc
,
357 .mmap
= __swiotlb_mmap
,
358 .get_sgtable
= __swiotlb_get_sgtable
,
359 .map_page
= __swiotlb_map_page
,
360 .unmap_page
= __swiotlb_unmap_page
,
361 .map_sg
= __swiotlb_map_sg_attrs
,
362 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
363 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
364 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
365 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
366 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
367 .dma_supported
= __swiotlb_dma_supported
,
368 .mapping_error
= swiotlb_dma_mapping_error
,
371 static int __init
atomic_pool_init(void)
373 pgprot_t prot
= __pgprot(PROT_NORMAL_NC
);
374 unsigned long nr_pages
= atomic_pool_size
>> PAGE_SHIFT
;
377 unsigned int pool_size_order
= get_order(atomic_pool_size
);
379 if (dev_get_cma_area(NULL
))
380 page
= dma_alloc_from_contiguous(NULL
, nr_pages
,
383 page
= alloc_pages(GFP_DMA
, pool_size_order
);
387 void *page_addr
= page_address(page
);
389 memset(page_addr
, 0, atomic_pool_size
);
390 __dma_flush_range(page_addr
, page_addr
+ atomic_pool_size
);
392 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
396 addr
= dma_common_contiguous_remap(page
, atomic_pool_size
,
397 VM_USERMAP
, prot
, atomic_pool_init
);
400 goto destroy_genpool
;
402 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)addr
,
404 atomic_pool_size
, -1);
408 gen_pool_set_algo(atomic_pool
,
409 gen_pool_first_fit_order_align
,
412 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
413 atomic_pool_size
/ 1024);
419 dma_common_free_remap(addr
, atomic_pool_size
, VM_USERMAP
);
421 gen_pool_destroy(atomic_pool
);
424 if (!dma_release_from_contiguous(NULL
, page
, nr_pages
))
425 __free_pages(page
, pool_size_order
);
427 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
428 atomic_pool_size
/ 1024);
432 /********************************************
433 * The following APIs are for dummy DMA ops *
434 ********************************************/
436 static void *__dummy_alloc(struct device
*dev
, size_t size
,
437 dma_addr_t
*dma_handle
, gfp_t flags
,
443 static void __dummy_free(struct device
*dev
, size_t size
,
444 void *vaddr
, dma_addr_t dma_handle
,
449 static int __dummy_mmap(struct device
*dev
,
450 struct vm_area_struct
*vma
,
451 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
457 static dma_addr_t
__dummy_map_page(struct device
*dev
, struct page
*page
,
458 unsigned long offset
, size_t size
,
459 enum dma_data_direction dir
,
462 return DMA_ERROR_CODE
;
465 static void __dummy_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
466 size_t size
, enum dma_data_direction dir
,
471 static int __dummy_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
472 int nelems
, enum dma_data_direction dir
,
478 static void __dummy_unmap_sg(struct device
*dev
,
479 struct scatterlist
*sgl
, int nelems
,
480 enum dma_data_direction dir
,
485 static void __dummy_sync_single(struct device
*dev
,
486 dma_addr_t dev_addr
, size_t size
,
487 enum dma_data_direction dir
)
491 static void __dummy_sync_sg(struct device
*dev
,
492 struct scatterlist
*sgl
, int nelems
,
493 enum dma_data_direction dir
)
497 static int __dummy_mapping_error(struct device
*hwdev
, dma_addr_t dma_addr
)
502 static int __dummy_dma_supported(struct device
*hwdev
, u64 mask
)
507 struct dma_map_ops dummy_dma_ops
= {
508 .alloc
= __dummy_alloc
,
509 .free
= __dummy_free
,
510 .mmap
= __dummy_mmap
,
511 .map_page
= __dummy_map_page
,
512 .unmap_page
= __dummy_unmap_page
,
513 .map_sg
= __dummy_map_sg
,
514 .unmap_sg
= __dummy_unmap_sg
,
515 .sync_single_for_cpu
= __dummy_sync_single
,
516 .sync_single_for_device
= __dummy_sync_single
,
517 .sync_sg_for_cpu
= __dummy_sync_sg
,
518 .sync_sg_for_device
= __dummy_sync_sg
,
519 .mapping_error
= __dummy_mapping_error
,
520 .dma_supported
= __dummy_dma_supported
,
522 EXPORT_SYMBOL(dummy_dma_ops
);
524 static int __init
arm64_dma_init(void)
526 if (swiotlb_force
|| max_pfn
> (arm64_dma_phys_limit
>> PAGE_SHIFT
))
529 return atomic_pool_init();
531 arch_initcall(arm64_dma_init
);
533 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
535 static int __init
dma_debug_do_init(void)
537 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
540 fs_initcall(dma_debug_do_init
);
543 #ifdef CONFIG_IOMMU_DMA
544 #include <linux/dma-iommu.h>
545 #include <linux/platform_device.h>
546 #include <linux/amba/bus.h>
548 /* Thankfully, all cache ops are by VA so we can ignore phys here */
549 static void flush_page(struct device
*dev
, const void *virt
, phys_addr_t phys
)
551 __dma_flush_range(virt
, virt
+ PAGE_SIZE
);
554 static void *__iommu_alloc_attrs(struct device
*dev
, size_t size
,
555 dma_addr_t
*handle
, gfp_t gfp
,
558 bool coherent
= is_device_dma_coherent(dev
);
559 int ioprot
= dma_direction_to_prot(DMA_BIDIRECTIONAL
, coherent
);
560 size_t iosize
= size
;
563 if (WARN(!dev
, "cannot create IOMMU mapping for unknown device\n"))
566 size
= PAGE_ALIGN(size
);
569 * Some drivers rely on this, and we probably don't want the
570 * possibility of stale kernel data being read by devices anyway.
574 if (gfpflags_allow_blocking(gfp
)) {
576 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
, coherent
);
578 pages
= iommu_dma_alloc(dev
, iosize
, gfp
, attrs
, ioprot
,
583 addr
= dma_common_pages_remap(pages
, size
, VM_USERMAP
, prot
,
584 __builtin_return_address(0));
586 iommu_dma_free(dev
, pages
, iosize
, handle
);
590 * In atomic context we can't remap anything, so we'll only
591 * get the virtually contiguous buffer we need by way of a
592 * physically contiguous allocation.
595 page
= alloc_pages(gfp
, get_order(size
));
596 addr
= page
? page_address(page
) : NULL
;
598 addr
= __alloc_from_pool(size
, &page
, gfp
);
603 *handle
= iommu_dma_map_page(dev
, page
, 0, iosize
, ioprot
);
604 if (iommu_dma_mapping_error(dev
, *handle
)) {
606 __free_pages(page
, get_order(size
));
608 __free_from_pool(addr
, size
);
615 static void __iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
616 dma_addr_t handle
, unsigned long attrs
)
618 size_t iosize
= size
;
620 size
= PAGE_ALIGN(size
);
622 * @cpu_addr will be one of 3 things depending on how it was allocated:
623 * - A remapped array of pages from iommu_dma_alloc(), for all
624 * non-atomic allocations.
625 * - A non-cacheable alias from the atomic pool, for atomic
626 * allocations by non-coherent devices.
627 * - A normal lowmem address, for atomic allocations by
629 * Hence how dodgy the below logic looks...
631 if (__in_atomic_pool(cpu_addr
, size
)) {
632 iommu_dma_unmap_page(dev
, handle
, iosize
, 0, 0);
633 __free_from_pool(cpu_addr
, size
);
634 } else if (is_vmalloc_addr(cpu_addr
)){
635 struct vm_struct
*area
= find_vm_area(cpu_addr
);
637 if (WARN_ON(!area
|| !area
->pages
))
639 iommu_dma_free(dev
, area
->pages
, iosize
, &handle
);
640 dma_common_free_remap(cpu_addr
, size
, VM_USERMAP
);
642 iommu_dma_unmap_page(dev
, handle
, iosize
, 0, 0);
643 __free_pages(virt_to_page(cpu_addr
), get_order(size
));
647 static int __iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
648 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
651 struct vm_struct
*area
;
654 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
655 is_device_dma_coherent(dev
));
657 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
660 area
= find_vm_area(cpu_addr
);
661 if (WARN_ON(!area
|| !area
->pages
))
664 return iommu_dma_mmap(area
->pages
, size
, vma
);
667 static int __iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
668 void *cpu_addr
, dma_addr_t dma_addr
,
669 size_t size
, unsigned long attrs
)
671 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
672 struct vm_struct
*area
= find_vm_area(cpu_addr
);
674 if (WARN_ON(!area
|| !area
->pages
))
677 return sg_alloc_table_from_pages(sgt
, area
->pages
, count
, 0, size
,
681 static void __iommu_sync_single_for_cpu(struct device
*dev
,
682 dma_addr_t dev_addr
, size_t size
,
683 enum dma_data_direction dir
)
687 if (is_device_dma_coherent(dev
))
690 phys
= iommu_iova_to_phys(iommu_get_domain_for_dev(dev
), dev_addr
);
691 __dma_unmap_area(phys_to_virt(phys
), size
, dir
);
694 static void __iommu_sync_single_for_device(struct device
*dev
,
695 dma_addr_t dev_addr
, size_t size
,
696 enum dma_data_direction dir
)
700 if (is_device_dma_coherent(dev
))
703 phys
= iommu_iova_to_phys(iommu_get_domain_for_dev(dev
), dev_addr
);
704 __dma_map_area(phys_to_virt(phys
), size
, dir
);
707 static dma_addr_t
__iommu_map_page(struct device
*dev
, struct page
*page
,
708 unsigned long offset
, size_t size
,
709 enum dma_data_direction dir
,
712 bool coherent
= is_device_dma_coherent(dev
);
713 int prot
= dma_direction_to_prot(dir
, coherent
);
714 dma_addr_t dev_addr
= iommu_dma_map_page(dev
, page
, offset
, size
, prot
);
716 if (!iommu_dma_mapping_error(dev
, dev_addr
) &&
717 (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
718 __iommu_sync_single_for_device(dev
, dev_addr
, size
, dir
);
723 static void __iommu_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
724 size_t size
, enum dma_data_direction dir
,
727 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
728 __iommu_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
730 iommu_dma_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
733 static void __iommu_sync_sg_for_cpu(struct device
*dev
,
734 struct scatterlist
*sgl
, int nelems
,
735 enum dma_data_direction dir
)
737 struct scatterlist
*sg
;
740 if (is_device_dma_coherent(dev
))
743 for_each_sg(sgl
, sg
, nelems
, i
)
744 __dma_unmap_area(sg_virt(sg
), sg
->length
, dir
);
747 static void __iommu_sync_sg_for_device(struct device
*dev
,
748 struct scatterlist
*sgl
, int nelems
,
749 enum dma_data_direction dir
)
751 struct scatterlist
*sg
;
754 if (is_device_dma_coherent(dev
))
757 for_each_sg(sgl
, sg
, nelems
, i
)
758 __dma_map_area(sg_virt(sg
), sg
->length
, dir
);
761 static int __iommu_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
762 int nelems
, enum dma_data_direction dir
,
765 bool coherent
= is_device_dma_coherent(dev
);
767 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
768 __iommu_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
770 return iommu_dma_map_sg(dev
, sgl
, nelems
,
771 dma_direction_to_prot(dir
, coherent
));
774 static void __iommu_unmap_sg_attrs(struct device
*dev
,
775 struct scatterlist
*sgl
, int nelems
,
776 enum dma_data_direction dir
,
779 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
780 __iommu_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
782 iommu_dma_unmap_sg(dev
, sgl
, nelems
, dir
, attrs
);
785 static struct dma_map_ops iommu_dma_ops
= {
786 .alloc
= __iommu_alloc_attrs
,
787 .free
= __iommu_free_attrs
,
788 .mmap
= __iommu_mmap_attrs
,
789 .get_sgtable
= __iommu_get_sgtable
,
790 .map_page
= __iommu_map_page
,
791 .unmap_page
= __iommu_unmap_page
,
792 .map_sg
= __iommu_map_sg_attrs
,
793 .unmap_sg
= __iommu_unmap_sg_attrs
,
794 .sync_single_for_cpu
= __iommu_sync_single_for_cpu
,
795 .sync_single_for_device
= __iommu_sync_single_for_device
,
796 .sync_sg_for_cpu
= __iommu_sync_sg_for_cpu
,
797 .sync_sg_for_device
= __iommu_sync_sg_for_device
,
798 .dma_supported
= iommu_dma_supported
,
799 .mapping_error
= iommu_dma_mapping_error
,
803 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
804 * everything it needs to - the device is only partially created and the
805 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
806 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
807 * to move the arch_setup_dma_ops() call later, all the notifier bits below
808 * become unnecessary, and will go away.
810 struct iommu_dma_notifier_data
{
811 struct list_head list
;
813 const struct iommu_ops
*ops
;
817 static LIST_HEAD(iommu_dma_masters
);
818 static DEFINE_MUTEX(iommu_dma_notifier_lock
);
820 static bool do_iommu_attach(struct device
*dev
, const struct iommu_ops
*ops
,
821 u64 dma_base
, u64 size
)
823 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
826 * If the IOMMU driver has the DMA domain support that we require,
827 * then the IOMMU core will have already configured a group for this
828 * device, and allocated the default domain for that group.
830 if (!domain
|| iommu_dma_init_domain(domain
, dma_base
, size
)) {
831 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
836 dev
->archdata
.dma_ops
= &iommu_dma_ops
;
840 static void queue_iommu_attach(struct device
*dev
, const struct iommu_ops
*ops
,
841 u64 dma_base
, u64 size
)
843 struct iommu_dma_notifier_data
*iommudata
;
845 iommudata
= kzalloc(sizeof(*iommudata
), GFP_KERNEL
);
849 iommudata
->dev
= dev
;
850 iommudata
->ops
= ops
;
851 iommudata
->dma_base
= dma_base
;
852 iommudata
->size
= size
;
854 mutex_lock(&iommu_dma_notifier_lock
);
855 list_add(&iommudata
->list
, &iommu_dma_masters
);
856 mutex_unlock(&iommu_dma_notifier_lock
);
859 static int __iommu_attach_notifier(struct notifier_block
*nb
,
860 unsigned long action
, void *data
)
862 struct iommu_dma_notifier_data
*master
, *tmp
;
864 if (action
!= BUS_NOTIFY_BIND_DRIVER
)
867 mutex_lock(&iommu_dma_notifier_lock
);
868 list_for_each_entry_safe(master
, tmp
, &iommu_dma_masters
, list
) {
869 if (data
== master
->dev
&& do_iommu_attach(master
->dev
,
870 master
->ops
, master
->dma_base
, master
->size
)) {
871 list_del(&master
->list
);
876 mutex_unlock(&iommu_dma_notifier_lock
);
880 static int __init
register_iommu_dma_ops_notifier(struct bus_type
*bus
)
882 struct notifier_block
*nb
= kzalloc(sizeof(*nb
), GFP_KERNEL
);
888 nb
->notifier_call
= __iommu_attach_notifier
;
890 ret
= bus_register_notifier(bus
, nb
);
892 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
899 static int __init
__iommu_dma_init(void)
903 ret
= iommu_dma_init();
905 ret
= register_iommu_dma_ops_notifier(&platform_bus_type
);
907 ret
= register_iommu_dma_ops_notifier(&amba_bustype
);
910 ret
= register_iommu_dma_ops_notifier(&pci_bus_type
);
914 arch_initcall(__iommu_dma_init
);
916 static void __iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
917 const struct iommu_ops
*ops
)
919 struct iommu_group
*group
;
924 * TODO: As a concession to the future, we're ready to handle being
925 * called both early and late (i.e. after bus_add_device). Once all
926 * the platform bus code is reworked to call us late and the notifier
927 * junk above goes away, move the body of do_iommu_attach here.
929 group
= iommu_group_get(dev
);
931 do_iommu_attach(dev
, ops
, dma_base
, size
);
932 iommu_group_put(group
);
934 queue_iommu_attach(dev
, ops
, dma_base
, size
);
938 void arch_teardown_dma_ops(struct device
*dev
)
940 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
943 iommu_detach_device(domain
, dev
);
945 dev
->archdata
.dma_ops
= NULL
;
950 static void __iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
951 const struct iommu_ops
*iommu
)
954 #endif /* CONFIG_IOMMU_DMA */
956 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
957 const struct iommu_ops
*iommu
, bool coherent
)
959 if (!dev
->archdata
.dma_ops
)
960 dev
->archdata
.dma_ops
= &swiotlb_dma_ops
;
962 dev
->archdata
.dma_coherent
= coherent
;
963 __iommu_setup_dma_ops(dev
, dma_base
, size
, iommu
);