2 * A fairly generic DMA-API to IOMMU-API glue layer.
4 * Copyright (C) 2014-2015 ARM Ltd.
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/gfp.h>
25 #include <linux/huge_mm.h>
26 #include <linux/iommu.h>
27 #include <linux/iova.h>
29 #include <linux/scatterlist.h>
30 #include <linux/vmalloc.h>
32 int iommu_dma_init(void)
34 return iova_cache_get();
38 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
39 * @domain: IOMMU domain to prepare for DMA-API usage
41 * IOMMU drivers should normally call this from their domain_alloc
42 * callback when domain->type == IOMMU_DOMAIN_DMA.
44 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
46 struct iova_domain
*iovad
;
48 if (domain
->iova_cookie
)
51 iovad
= kzalloc(sizeof(*iovad
), GFP_KERNEL
);
52 domain
->iova_cookie
= iovad
;
54 return iovad
? 0 : -ENOMEM
;
56 EXPORT_SYMBOL(iommu_get_dma_cookie
);
59 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
60 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
62 * IOMMU drivers should normally call this from their domain_free callback.
64 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
66 struct iova_domain
*iovad
= domain
->iova_cookie
;
72 put_iova_domain(iovad
);
74 domain
->iova_cookie
= NULL
;
76 EXPORT_SYMBOL(iommu_put_dma_cookie
);
79 * iommu_dma_init_domain - Initialise a DMA mapping domain
80 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
81 * @base: IOVA at which the mappable address space starts
82 * @size: Size of IOVA space
84 * @base and @size should be exact multiples of IOMMU page granularity to
85 * avoid rounding surprises. If necessary, we reserve the page at address 0
86 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
87 * any change which could make prior IOVAs invalid will fail.
89 int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
, u64 size
)
91 struct iova_domain
*iovad
= domain
->iova_cookie
;
92 unsigned long order
, base_pfn
, end_pfn
;
97 /* Use the smallest supported page size for IOVA granularity */
98 order
= __ffs(domain
->pgsize_bitmap
);
99 base_pfn
= max_t(unsigned long, 1, base
>> order
);
100 end_pfn
= (base
+ size
- 1) >> order
;
102 /* Check the domain allows at least some access to the device... */
103 if (domain
->geometry
.force_aperture
) {
104 if (base
> domain
->geometry
.aperture_end
||
105 base
+ size
<= domain
->geometry
.aperture_start
) {
106 pr_warn("specified DMA range outside IOMMU capability\n");
109 /* ...then finally give it a kicking to make sure it fits */
110 base_pfn
= max_t(unsigned long, base_pfn
,
111 domain
->geometry
.aperture_start
>> order
);
112 end_pfn
= min_t(unsigned long, end_pfn
,
113 domain
->geometry
.aperture_end
>> order
);
116 /* All we can safely do with an existing domain is enlarge it */
117 if (iovad
->start_pfn
) {
118 if (1UL << order
!= iovad
->granule
||
119 base_pfn
!= iovad
->start_pfn
||
120 end_pfn
< iovad
->dma_32bit_pfn
) {
121 pr_warn("Incompatible range for DMA domain\n");
124 iovad
->dma_32bit_pfn
= end_pfn
;
126 init_iova_domain(iovad
, 1UL << order
, base_pfn
, end_pfn
);
130 EXPORT_SYMBOL(iommu_dma_init_domain
);
133 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
134 * @dir: Direction of DMA transfer
135 * @coherent: Is the DMA master cache-coherent?
137 * Return: corresponding IOMMU API page protection flags
139 int dma_direction_to_prot(enum dma_data_direction dir
, bool coherent
)
141 int prot
= coherent
? IOMMU_CACHE
: 0;
144 case DMA_BIDIRECTIONAL
:
145 return prot
| IOMMU_READ
| IOMMU_WRITE
;
147 return prot
| IOMMU_READ
;
148 case DMA_FROM_DEVICE
:
149 return prot
| IOMMU_WRITE
;
155 static struct iova
*__alloc_iova(struct iommu_domain
*domain
, size_t size
,
156 dma_addr_t dma_limit
)
158 struct iova_domain
*iovad
= domain
->iova_cookie
;
159 unsigned long shift
= iova_shift(iovad
);
160 unsigned long length
= iova_align(iovad
, size
) >> shift
;
162 if (domain
->geometry
.force_aperture
)
163 dma_limit
= min(dma_limit
, domain
->geometry
.aperture_end
);
165 * Enforce size-alignment to be safe - there could perhaps be an
166 * attribute to control this per-device, or at least per-domain...
168 return alloc_iova(iovad
, length
, dma_limit
>> shift
, true);
171 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
172 static void __iommu_dma_unmap(struct iommu_domain
*domain
, dma_addr_t dma_addr
)
174 struct iova_domain
*iovad
= domain
->iova_cookie
;
175 unsigned long shift
= iova_shift(iovad
);
176 unsigned long pfn
= dma_addr
>> shift
;
177 struct iova
*iova
= find_iova(iovad
, pfn
);
183 size
= iova_size(iova
) << shift
;
184 size
-= iommu_unmap(domain
, pfn
<< shift
, size
);
185 /* ...and if we can't, then something is horribly, horribly wrong */
187 __free_iova(iovad
, iova
);
190 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
193 __free_page(pages
[count
]);
197 static struct page
**__iommu_dma_alloc_pages(unsigned int count
,
198 unsigned long order_mask
, gfp_t gfp
)
201 unsigned int i
= 0, array_size
= count
* sizeof(*pages
);
203 order_mask
&= (2U << MAX_ORDER
) - 1;
207 if (array_size
<= PAGE_SIZE
)
208 pages
= kzalloc(array_size
, GFP_KERNEL
);
210 pages
= vzalloc(array_size
);
214 /* IOMMU can map any pages, so himem can also be used here */
215 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
218 struct page
*page
= NULL
;
219 unsigned int order_size
;
222 * Higher-order allocations are a convenience rather
223 * than a necessity, hence using __GFP_NORETRY until
224 * falling back to minimum-order allocations.
226 for (order_mask
&= (2U << __fls(count
)) - 1;
227 order_mask
; order_mask
&= ~order_size
) {
228 unsigned int order
= __fls(order_mask
);
230 order_size
= 1U << order
;
231 page
= alloc_pages((order_mask
- order_size
) ?
232 gfp
| __GFP_NORETRY
: gfp
, order
);
237 if (!PageCompound(page
)) {
238 split_page(page
, order
);
240 } else if (!split_huge_page(page
)) {
243 __free_pages(page
, order
);
246 __iommu_dma_free_pages(pages
, i
);
257 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
258 * @dev: Device which owns this buffer
259 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
260 * @size: Size of buffer in bytes
261 * @handle: DMA address of buffer
263 * Frees both the pages associated with the buffer, and the array
266 void iommu_dma_free(struct device
*dev
, struct page
**pages
, size_t size
,
269 __iommu_dma_unmap(iommu_get_domain_for_dev(dev
), *handle
);
270 __iommu_dma_free_pages(pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
271 *handle
= DMA_ERROR_CODE
;
275 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
276 * @dev: Device to allocate memory for. Must be a real device
277 * attached to an iommu_dma_domain
278 * @size: Size of buffer in bytes
279 * @gfp: Allocation flags
280 * @attrs: DMA attributes for this allocation
281 * @prot: IOMMU mapping flags
282 * @handle: Out argument for allocated DMA handle
283 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
284 * given VA/PA are visible to the given non-coherent device.
286 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
287 * but an IOMMU which supports smaller pages might not map the whole thing.
289 * Return: Array of struct page pointers describing the buffer,
290 * or NULL on failure.
292 struct page
**iommu_dma_alloc(struct device
*dev
, size_t size
, gfp_t gfp
,
293 unsigned long attrs
, int prot
, dma_addr_t
*handle
,
294 void (*flush_page
)(struct device
*, const void *, phys_addr_t
))
296 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
297 struct iova_domain
*iovad
= domain
->iova_cookie
;
302 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
304 *handle
= DMA_ERROR_CODE
;
306 min_size
= alloc_sizes
& -alloc_sizes
;
307 if (min_size
< PAGE_SIZE
) {
308 min_size
= PAGE_SIZE
;
309 alloc_sizes
|= PAGE_SIZE
;
311 size
= ALIGN(size
, min_size
);
313 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
314 alloc_sizes
= min_size
;
316 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
317 pages
= __iommu_dma_alloc_pages(count
, alloc_sizes
>> PAGE_SHIFT
, gfp
);
321 iova
= __alloc_iova(domain
, size
, dev
->coherent_dma_mask
);
325 size
= iova_align(iovad
, size
);
326 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
329 if (!(prot
& IOMMU_CACHE
)) {
330 struct sg_mapping_iter miter
;
332 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
333 * sufficient here, so skip it by using the "wrong" direction.
335 sg_miter_start(&miter
, sgt
.sgl
, sgt
.orig_nents
, SG_MITER_FROM_SG
);
336 while (sg_miter_next(&miter
))
337 flush_page(dev
, miter
.addr
, page_to_phys(miter
.page
));
338 sg_miter_stop(&miter
);
341 dma_addr
= iova_dma_addr(iovad
, iova
);
342 if (iommu_map_sg(domain
, dma_addr
, sgt
.sgl
, sgt
.orig_nents
, prot
)
353 __free_iova(iovad
, iova
);
355 __iommu_dma_free_pages(pages
, count
);
360 * iommu_dma_mmap - Map a buffer into provided user VMA
361 * @pages: Array representing buffer from iommu_dma_alloc()
362 * @size: Size of buffer in bytes
363 * @vma: VMA describing requested userspace mapping
365 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
366 * for verifying the correct size and protection of @vma beforehand.
369 int iommu_dma_mmap(struct page
**pages
, size_t size
, struct vm_area_struct
*vma
)
371 unsigned long uaddr
= vma
->vm_start
;
372 unsigned int i
, count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
375 for (i
= vma
->vm_pgoff
; i
< count
&& uaddr
< vma
->vm_end
; i
++) {
376 ret
= vm_insert_page(vma
, uaddr
, pages
[i
]);
384 dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
385 unsigned long offset
, size_t size
, int prot
)
388 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
389 struct iova_domain
*iovad
= domain
->iova_cookie
;
390 phys_addr_t phys
= page_to_phys(page
) + offset
;
391 size_t iova_off
= iova_offset(iovad
, phys
);
392 size_t len
= iova_align(iovad
, size
+ iova_off
);
393 struct iova
*iova
= __alloc_iova(domain
, len
, dma_get_mask(dev
));
396 return DMA_ERROR_CODE
;
398 dma_addr
= iova_dma_addr(iovad
, iova
);
399 if (iommu_map(domain
, dma_addr
, phys
- iova_off
, len
, prot
)) {
400 __free_iova(iovad
, iova
);
401 return DMA_ERROR_CODE
;
403 return dma_addr
+ iova_off
;
406 void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t handle
, size_t size
,
407 enum dma_data_direction dir
, unsigned long attrs
)
409 __iommu_dma_unmap(iommu_get_domain_for_dev(dev
), handle
);
413 * Prepare a successfully-mapped scatterlist to give back to the caller.
415 * At this point the segments are already laid out by iommu_dma_map_sg() to
416 * avoid individually crossing any boundaries, so we merely need to check a
417 * segment's start address to avoid concatenating across one.
419 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
422 struct scatterlist
*s
, *cur
= sg
;
423 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
424 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
427 for_each_sg(sg
, s
, nents
, i
) {
428 /* Restore this segment's original unaligned fields first */
429 unsigned int s_iova_off
= sg_dma_address(s
);
430 unsigned int s_length
= sg_dma_len(s
);
431 unsigned int s_iova_len
= s
->length
;
433 s
->offset
+= s_iova_off
;
434 s
->length
= s_length
;
435 sg_dma_address(s
) = DMA_ERROR_CODE
;
439 * Now fill in the real DMA data. If...
440 * - there is a valid output segment to append to
441 * - and this segment starts on an IOVA page boundary
442 * - but doesn't fall at a segment boundary
443 * - and wouldn't make the resulting output segment too long
445 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
446 (cur_len
+ s_length
<= max_len
)) {
447 /* ...then concatenate it with the previous one */
450 /* Otherwise start the next output segment */
456 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
459 sg_dma_len(cur
) = cur_len
;
460 dma_addr
+= s_iova_len
;
462 if (s_length
+ s_iova_off
< s_iova_len
)
469 * If mapping failed, then just restore the original list,
470 * but making sure the DMA fields are invalidated.
472 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
474 struct scatterlist
*s
;
477 for_each_sg(sg
, s
, nents
, i
) {
478 if (sg_dma_address(s
) != DMA_ERROR_CODE
)
479 s
->offset
+= sg_dma_address(s
);
481 s
->length
= sg_dma_len(s
);
482 sg_dma_address(s
) = DMA_ERROR_CODE
;
488 * The DMA API client is passing in a scatterlist which could describe
489 * any old buffer layout, but the IOMMU API requires everything to be
490 * aligned to IOMMU pages. Hence the need for this complicated bit of
491 * impedance-matching, to be able to hand off a suitably-aligned list,
492 * but still preserve the original offsets and sizes for the caller.
494 int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
497 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
498 struct iova_domain
*iovad
= domain
->iova_cookie
;
500 struct scatterlist
*s
, *prev
= NULL
;
503 unsigned long mask
= dma_get_seg_boundary(dev
);
507 * Work out how much IOVA space we need, and align the segments to
508 * IOVA granules for the IOMMU driver to handle. With some clever
509 * trickery we can modify the list in-place, but reversibly, by
510 * stashing the unaligned parts in the as-yet-unused DMA fields.
512 for_each_sg(sg
, s
, nents
, i
) {
513 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
514 size_t s_length
= s
->length
;
515 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
517 sg_dma_address(s
) = s_iova_off
;
518 sg_dma_len(s
) = s_length
;
519 s
->offset
-= s_iova_off
;
520 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
521 s
->length
= s_length
;
524 * Due to the alignment of our single IOVA allocation, we can
525 * depend on these assumptions about the segment boundary mask:
526 * - If mask size >= IOVA size, then the IOVA range cannot
527 * possibly fall across a boundary, so we don't care.
528 * - If mask size < IOVA size, then the IOVA range must start
529 * exactly on a boundary, therefore we can lay things out
530 * based purely on segment lengths without needing to know
531 * the actual addresses beforehand.
532 * - The mask must be a power of 2, so pad_len == 0 if
533 * iova_len == 0, thus we cannot dereference prev the first
534 * time through here (i.e. before it has a meaningful value).
536 if (pad_len
&& pad_len
< s_length
- 1) {
537 prev
->length
+= pad_len
;
541 iova_len
+= s_length
;
545 iova
= __alloc_iova(domain
, iova_len
, dma_get_mask(dev
));
550 * We'll leave any physical concatenation to the IOMMU driver's
551 * implementation - it knows better than we do.
553 dma_addr
= iova_dma_addr(iovad
, iova
);
554 if (iommu_map_sg(domain
, dma_addr
, sg
, nents
, prot
) < iova_len
)
557 return __finalise_sg(dev
, sg
, nents
, dma_addr
);
560 __free_iova(iovad
, iova
);
562 __invalidate_sg(sg
, nents
);
566 void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
567 enum dma_data_direction dir
, unsigned long attrs
)
570 * The scatterlist segments are mapped into a single
571 * contiguous IOVA allocation, so this is incredibly easy.
573 __iommu_dma_unmap(iommu_get_domain_for_dev(dev
), sg_dma_address(sg
));
576 int iommu_dma_supported(struct device
*dev
, u64 mask
)
579 * 'Special' IOMMUs which don't have the same addressing capability
580 * as the CPU will have to wait until we have some way to query that
581 * before they'll be able to use this framework.
586 int iommu_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
588 return dma_addr
== DMA_ERROR_CODE
;