2e551e2d2d03a7d78c80633637672a483ae42199
1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
13 * A dma_addr_t can hold any valid DMA or bus address for the platform.
14 * It can be given to a device to use as a DMA source or target. A CPU cannot
15 * reference a dma_addr_t directly because there may be translation between
16 * its physical address space and the bus address space.
19 void* (*alloc
)(struct device
*dev
, size_t size
,
20 dma_addr_t
*dma_handle
, gfp_t gfp
,
21 struct dma_attrs
*attrs
);
22 void (*free
)(struct device
*dev
, size_t size
,
23 void *vaddr
, dma_addr_t dma_handle
,
24 struct dma_attrs
*attrs
);
25 int (*mmap
)(struct device
*, struct vm_area_struct
*,
26 void *, dma_addr_t
, size_t, struct dma_attrs
*attrs
);
28 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
29 dma_addr_t
, size_t, struct dma_attrs
*attrs
);
31 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
32 unsigned long offset
, size_t size
,
33 enum dma_data_direction dir
,
34 struct dma_attrs
*attrs
);
35 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
36 size_t size
, enum dma_data_direction dir
,
37 struct dma_attrs
*attrs
);
39 * map_sg returns 0 on error and a value > 0 on success.
40 * It should never return a value < 0.
42 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
43 int nents
, enum dma_data_direction dir
,
44 struct dma_attrs
*attrs
);
45 void (*unmap_sg
)(struct device
*dev
,
46 struct scatterlist
*sg
, int nents
,
47 enum dma_data_direction dir
,
48 struct dma_attrs
*attrs
);
49 void (*sync_single_for_cpu
)(struct device
*dev
,
50 dma_addr_t dma_handle
, size_t size
,
51 enum dma_data_direction dir
);
52 void (*sync_single_for_device
)(struct device
*dev
,
53 dma_addr_t dma_handle
, size_t size
,
54 enum dma_data_direction dir
);
55 void (*sync_sg_for_cpu
)(struct device
*dev
,
56 struct scatterlist
*sg
, int nents
,
57 enum dma_data_direction dir
);
58 void (*sync_sg_for_device
)(struct device
*dev
,
59 struct scatterlist
*sg
, int nents
,
60 enum dma_data_direction dir
);
61 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
62 int (*dma_supported
)(struct device
*dev
, u64 mask
);
63 int (*set_dma_mask
)(struct device
*dev
, u64 mask
);
64 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
65 u64 (*get_required_mask
)(struct device
*dev
);
70 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
72 #define DMA_MASK_NONE 0x0ULL
74 static inline int valid_dma_direction(int dma_direction
)
76 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
77 (dma_direction
== DMA_TO_DEVICE
) ||
78 (dma_direction
== DMA_FROM_DEVICE
));
81 static inline int is_device_dma_capable(struct device
*dev
)
83 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
87 #include <asm/dma-mapping.h>
89 #include <asm-generic/dma-mapping-broken.h>
92 static inline u64
dma_get_mask(struct device
*dev
)
94 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
95 return *dev
->dma_mask
;
96 return DMA_BIT_MASK(32);
99 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
100 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
102 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
104 if (!dma_supported(dev
, mask
))
106 dev
->coherent_dma_mask
= mask
;
112 * Set both the DMA mask and the coherent DMA mask to the same thing.
113 * Note that we don't check the return value from dma_set_coherent_mask()
114 * as the DMA API guarantees that the coherent DMA mask can be set to
115 * the same or smaller than the streaming DMA mask.
117 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
119 int rc
= dma_set_mask(dev
, mask
);
121 dma_set_coherent_mask(dev
, mask
);
126 * Similar to the above, except it deals with the case where the device
127 * does not have dev->dma_mask appropriately setup.
129 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
131 dev
->dma_mask
= &dev
->coherent_dma_mask
;
132 return dma_set_mask_and_coherent(dev
, mask
);
135 extern u64
dma_get_required_mask(struct device
*dev
);
137 #ifndef arch_setup_dma_ops
138 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
139 u64 size
, struct iommu_ops
*iommu
,
143 #ifndef arch_teardown_dma_ops
144 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
147 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
149 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
150 return dev
->dma_parms
->max_segment_size
;
154 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
157 if (dev
->dma_parms
) {
158 dev
->dma_parms
->max_segment_size
= size
;
164 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
166 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
167 return dev
->dma_parms
->segment_boundary_mask
;
168 return DMA_BIT_MASK(32);
171 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
173 if (dev
->dma_parms
) {
174 dev
->dma_parms
->segment_boundary_mask
= mask
;
181 static inline unsigned long dma_max_pfn(struct device
*dev
)
183 return *dev
->dma_mask
>> PAGE_SHIFT
;
187 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
188 dma_addr_t
*dma_handle
, gfp_t flag
)
190 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
195 #ifdef CONFIG_HAS_DMA
196 static inline int dma_get_cache_alignment(void)
198 #ifdef ARCH_DMA_MINALIGN
199 return ARCH_DMA_MINALIGN
;
205 /* flags for the coherent memory api */
206 #define DMA_MEMORY_MAP 0x01
207 #define DMA_MEMORY_IO 0x02
208 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
209 #define DMA_MEMORY_EXCLUSIVE 0x08
211 #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
213 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
214 dma_addr_t device_addr
, size_t size
, int flags
)
220 dma_release_declared_memory(struct device
*dev
)
225 dma_mark_declared_memory_occupied(struct device
*dev
,
226 dma_addr_t device_addr
, size_t size
)
228 return ERR_PTR(-EBUSY
);
235 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
236 dma_addr_t
*dma_handle
, gfp_t gfp
);
237 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
238 dma_addr_t dma_handle
);
239 extern void *dmam_alloc_noncoherent(struct device
*dev
, size_t size
,
240 dma_addr_t
*dma_handle
, gfp_t gfp
);
241 extern void dmam_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
242 dma_addr_t dma_handle
);
243 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
244 extern int dmam_declare_coherent_memory(struct device
*dev
,
245 phys_addr_t phys_addr
,
246 dma_addr_t device_addr
, size_t size
,
248 extern void dmam_release_declared_memory(struct device
*dev
);
249 #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
250 static inline int dmam_declare_coherent_memory(struct device
*dev
,
251 phys_addr_t phys_addr
, dma_addr_t device_addr
,
252 size_t size
, gfp_t gfp
)
257 static inline void dmam_release_declared_memory(struct device
*dev
)
260 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
262 #ifndef CONFIG_HAVE_DMA_ATTRS
265 #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
266 dma_map_single(dev, cpu_addr, size, dir)
268 #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
269 dma_unmap_single(dev, dma_addr, size, dir)
271 #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
272 dma_map_sg(dev, sgl, nents, dir)
274 #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
275 dma_unmap_sg(dev, sgl, nents, dir)
278 static inline void *dma_alloc_writecombine(struct device
*dev
, size_t size
,
279 dma_addr_t
*dma_addr
, gfp_t gfp
)
281 DEFINE_DMA_ATTRS(attrs
);
282 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
283 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
, &attrs
);
286 static inline void dma_free_writecombine(struct device
*dev
, size_t size
,
287 void *cpu_addr
, dma_addr_t dma_addr
)
289 DEFINE_DMA_ATTRS(attrs
);
290 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
291 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
, &attrs
);
294 static inline int dma_mmap_writecombine(struct device
*dev
,
295 struct vm_area_struct
*vma
,
296 void *cpu_addr
, dma_addr_t dma_addr
,
299 DEFINE_DMA_ATTRS(attrs
);
300 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
301 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, &attrs
);
303 #endif /* CONFIG_HAVE_DMA_ATTRS */
305 #ifdef CONFIG_NEED_DMA_MAP_STATE
306 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
307 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
308 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
309 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
310 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
311 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
313 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
314 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
315 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
316 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
317 #define dma_unmap_len(PTR, LEN_NAME) (0)
318 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
This page took 0.072185 seconds and 5 git commands to generate.