2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/types.h>
12 #include <linux/cache.h>
13 /* need struct page definitions */
15 #include <linux/scatterlist.h>
16 #include <linux/dma-attrs.h>
18 #include <asm/swiotlb.h>
20 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
22 /* Some dma direct funcs must be visible for use in other dma_ops */
23 extern void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
24 dma_addr_t
*dma_handle
, gfp_t flag
);
25 extern void dma_direct_free_coherent(struct device
*dev
, size_t size
,
26 void *vaddr
, dma_addr_t dma_handle
);
28 extern unsigned long get_dma_direct_offset(struct device
*dev
);
30 #ifdef CONFIG_NOT_COHERENT_CACHE
32 * DMA-consistent mapping functions for PowerPCs that don't support
33 * cache snooping. These allocate/free a region of uncached mapped
34 * memory space for use with DMA devices. Alternatively, you could
35 * allocate the space "normally" and use the cache management functions
36 * to ensure it is consistent.
39 extern void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
40 dma_addr_t
*handle
, gfp_t gfp
);
41 extern void __dma_free_coherent(size_t size
, void *vaddr
);
42 extern void __dma_sync(void *vaddr
, size_t size
, int direction
);
43 extern void __dma_sync_page(struct page
*page
, unsigned long offset
,
44 size_t size
, int direction
);
46 #else /* ! CONFIG_NOT_COHERENT_CACHE */
48 * Cache coherent cores.
51 #define __dma_alloc_coherent(dev, gfp, size, handle) NULL
52 #define __dma_free_coherent(size, addr) ((void)0)
53 #define __dma_sync(addr, size, rw) ((void)0)
54 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
56 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
58 static inline unsigned long device_to_mask(struct device
*dev
)
60 if (dev
->dma_mask
&& *dev
->dma_mask
)
61 return *dev
->dma_mask
;
62 /* Assume devices without mask can take 32 bit addresses */
67 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
69 struct dma_mapping_ops
{
70 void * (*alloc_coherent
)(struct device
*dev
, size_t size
,
71 dma_addr_t
*dma_handle
, gfp_t flag
);
72 void (*free_coherent
)(struct device
*dev
, size_t size
,
73 void *vaddr
, dma_addr_t dma_handle
);
74 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
75 int nents
, enum dma_data_direction direction
,
76 struct dma_attrs
*attrs
);
77 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
,
78 int nents
, enum dma_data_direction direction
,
79 struct dma_attrs
*attrs
);
80 int (*dma_supported
)(struct device
*dev
, u64 mask
);
81 int (*set_dma_mask
)(struct device
*dev
, u64 dma_mask
);
82 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
83 unsigned long offset
, size_t size
,
84 enum dma_data_direction direction
,
85 struct dma_attrs
*attrs
);
86 void (*unmap_page
)(struct device
*dev
,
87 dma_addr_t dma_address
, size_t size
,
88 enum dma_data_direction direction
,
89 struct dma_attrs
*attrs
);
90 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
91 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
92 dma_addr_t dma_handle
, unsigned long offset
,
94 enum dma_data_direction direction
);
95 void (*sync_single_range_for_device
)(struct device
*hwdev
,
96 dma_addr_t dma_handle
, unsigned long offset
,
98 enum dma_data_direction direction
);
99 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
100 struct scatterlist
*sg
, int nelems
,
101 enum dma_data_direction direction
);
102 void (*sync_sg_for_device
)(struct device
*hwdev
,
103 struct scatterlist
*sg
, int nelems
,
104 enum dma_data_direction direction
);
109 * Available generic sets of operations
112 extern struct dma_mapping_ops dma_iommu_ops
;
114 extern struct dma_mapping_ops dma_direct_ops
;
116 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
118 /* We don't handle the NULL dev case for ISA for now. We could
119 * do it via an out of line call but it is not needed for now. The
120 * only ISA DMA device we support is the floppy and we have a hack
121 * in the floppy driver directly to get a device for us.
123 if (unlikely(dev
== NULL
))
126 return dev
->archdata
.dma_ops
;
129 static inline void set_dma_ops(struct device
*dev
, struct dma_mapping_ops
*ops
)
131 dev
->archdata
.dma_ops
= ops
;
134 static inline int dma_supported(struct device
*dev
, u64 mask
)
136 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
138 if (unlikely(dma_ops
== NULL
))
140 if (dma_ops
->dma_supported
== NULL
)
142 return dma_ops
->dma_supported(dev
, mask
);
145 /* We have our own implementation of pci_set_dma_mask() */
146 #define HAVE_ARCH_PCI_SET_DMA_MASK
148 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
150 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
152 if (unlikely(dma_ops
== NULL
))
154 if (dma_ops
->set_dma_mask
!= NULL
)
155 return dma_ops
->set_dma_mask(dev
, dma_mask
);
156 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
158 *dev
->dma_mask
= dma_mask
;
163 * map_/unmap_single actually call through to map/unmap_page now that all the
164 * dma_mapping_ops have been converted over. We just have to get the page and
165 * offset to pass through to map_page
167 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
,
170 enum dma_data_direction direction
,
171 struct dma_attrs
*attrs
)
173 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
177 return dma_ops
->map_page(dev
, virt_to_page(cpu_addr
),
178 (unsigned long)cpu_addr
% PAGE_SIZE
, size
,
182 static inline void dma_unmap_single_attrs(struct device
*dev
,
185 enum dma_data_direction direction
,
186 struct dma_attrs
*attrs
)
188 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
192 dma_ops
->unmap_page(dev
, dma_addr
, size
, direction
, attrs
);
195 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
197 unsigned long offset
, size_t size
,
198 enum dma_data_direction direction
,
199 struct dma_attrs
*attrs
)
201 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
205 return dma_ops
->map_page(dev
, page
, offset
, size
, direction
, attrs
);
208 static inline void dma_unmap_page_attrs(struct device
*dev
,
209 dma_addr_t dma_address
,
211 enum dma_data_direction direction
,
212 struct dma_attrs
*attrs
)
214 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
218 dma_ops
->unmap_page(dev
, dma_address
, size
, direction
, attrs
);
221 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
222 int nents
, enum dma_data_direction direction
,
223 struct dma_attrs
*attrs
)
225 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
228 return dma_ops
->map_sg(dev
, sg
, nents
, direction
, attrs
);
231 static inline void dma_unmap_sg_attrs(struct device
*dev
,
232 struct scatterlist
*sg
,
234 enum dma_data_direction direction
,
235 struct dma_attrs
*attrs
)
237 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
240 dma_ops
->unmap_sg(dev
, sg
, nhwentries
, direction
, attrs
);
243 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
244 dma_addr_t
*dma_handle
, gfp_t flag
)
246 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
249 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, flag
);
252 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
253 void *cpu_addr
, dma_addr_t dma_handle
)
255 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
258 dma_ops
->free_coherent(dev
, size
, cpu_addr
, dma_handle
);
261 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
263 enum dma_data_direction direction
)
265 return dma_map_single_attrs(dev
, cpu_addr
, size
, direction
, NULL
);
268 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
270 enum dma_data_direction direction
)
272 dma_unmap_single_attrs(dev
, dma_addr
, size
, direction
, NULL
);
275 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
276 unsigned long offset
, size_t size
,
277 enum dma_data_direction direction
)
279 return dma_map_page_attrs(dev
, page
, offset
, size
, direction
, NULL
);
282 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
284 enum dma_data_direction direction
)
286 dma_unmap_page_attrs(dev
, dma_address
, size
, direction
, NULL
);
289 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
290 int nents
, enum dma_data_direction direction
)
292 return dma_map_sg_attrs(dev
, sg
, nents
, direction
, NULL
);
295 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
297 enum dma_data_direction direction
)
299 dma_unmap_sg_attrs(dev
, sg
, nhwentries
, direction
, NULL
);
302 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
303 static inline void dma_sync_single_for_cpu(struct device
*dev
,
304 dma_addr_t dma_handle
, size_t size
,
305 enum dma_data_direction direction
)
307 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
311 if (dma_ops
->sync_single_range_for_cpu
)
312 dma_ops
->sync_single_range_for_cpu(dev
, dma_handle
, 0,
316 static inline void dma_sync_single_for_device(struct device
*dev
,
317 dma_addr_t dma_handle
, size_t size
,
318 enum dma_data_direction direction
)
320 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
324 if (dma_ops
->sync_single_range_for_device
)
325 dma_ops
->sync_single_range_for_device(dev
, dma_handle
,
329 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
330 struct scatterlist
*sgl
, int nents
,
331 enum dma_data_direction direction
)
333 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
337 if (dma_ops
->sync_sg_for_cpu
)
338 dma_ops
->sync_sg_for_cpu(dev
, sgl
, nents
, direction
);
341 static inline void dma_sync_sg_for_device(struct device
*dev
,
342 struct scatterlist
*sgl
, int nents
,
343 enum dma_data_direction direction
)
345 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
349 if (dma_ops
->sync_sg_for_device
)
350 dma_ops
->sync_sg_for_device(dev
, sgl
, nents
, direction
);
353 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
354 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
355 enum dma_data_direction direction
)
357 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
361 if (dma_ops
->sync_single_range_for_cpu
)
362 dma_ops
->sync_single_range_for_cpu(dev
, dma_handle
,
363 offset
, size
, direction
);
366 static inline void dma_sync_single_range_for_device(struct device
*dev
,
367 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
368 enum dma_data_direction direction
)
370 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
374 if (dma_ops
->sync_single_range_for_device
)
375 dma_ops
->sync_single_range_for_device(dev
, dma_handle
, offset
,
378 #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
379 static inline void dma_sync_single_for_cpu(struct device
*dev
,
380 dma_addr_t dma_handle
, size_t size
,
381 enum dma_data_direction direction
)
385 static inline void dma_sync_single_for_device(struct device
*dev
,
386 dma_addr_t dma_handle
, size_t size
,
387 enum dma_data_direction direction
)
391 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
392 struct scatterlist
*sgl
, int nents
,
393 enum dma_data_direction direction
)
397 static inline void dma_sync_sg_for_device(struct device
*dev
,
398 struct scatterlist
*sgl
, int nents
,
399 enum dma_data_direction direction
)
403 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
404 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
405 enum dma_data_direction direction
)
409 static inline void dma_sync_single_range_for_device(struct device
*dev
,
410 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
411 enum dma_data_direction direction
)
416 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
419 return (dma_addr
== DMA_ERROR_CODE
);
425 static inline bool dma_capable(struct device
*dev
, dma_addr_t addr
, size_t size
)
427 #ifdef CONFIG_SWIOTLB
428 struct dev_archdata
*sd
= &dev
->archdata
;
430 if (sd
->max_direct_dma_addr
&& addr
+ size
> sd
->max_direct_dma_addr
)
437 return addr
+ size
<= *dev
->dma_mask
;
440 static inline dma_addr_t
phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
442 return paddr
+ get_dma_direct_offset(dev
);
445 static inline phys_addr_t
dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
447 return daddr
- get_dma_direct_offset(dev
);
450 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
451 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
452 #ifdef CONFIG_NOT_COHERENT_CACHE
453 #define dma_is_consistent(d, h) (0)
455 #define dma_is_consistent(d, h) (1)
458 static inline int dma_get_cache_alignment(void)
461 /* no easy way to get cache size on all processors, so return
462 * the maximum possible, to be safe */
463 return (1 << INTERNODE_CACHE_SHIFT
);
466 * Each processor family will define its own L1_CACHE_SHIFT,
467 * L1_CACHE_BYTES wraps to this, so this is always safe.
469 return L1_CACHE_BYTES
;
473 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
474 enum dma_data_direction direction
)
476 BUG_ON(direction
== DMA_NONE
);
477 __dma_sync(vaddr
, size
, (int)direction
);
480 #endif /* __KERNEL__ */
481 #endif /* _ASM_DMA_MAPPING_H */