ARM: dma-mapping: use asm-generic/dma-mapping-common.h
[deliverable/linux.git] / arch / arm / include / asm / dma-mapping.h
1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-debug.h>
9
10 #include <asm-generic/dma-coherent.h>
11 #include <asm/memory.h>
12
13 #define DMA_ERROR_CODE (~0)
14 extern struct dma_map_ops arm_dma_ops;
15
16 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
17 {
18 if (dev && dev->archdata.dma_ops)
19 return dev->archdata.dma_ops;
20 return &arm_dma_ops;
21 }
22
23 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
24 {
25 BUG_ON(!dev);
26 dev->archdata.dma_ops = ops;
27 }
28
29 #include <asm-generic/dma-mapping-common.h>
30
31 static inline int dma_set_mask(struct device *dev, u64 mask)
32 {
33 return get_dma_ops(dev)->set_dma_mask(dev, mask);
34 }
35
36 #ifdef __arch_page_to_dma
37 #error Please update to __arch_pfn_to_dma
38 #endif
39
40 /*
41 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
42 * functions used internally by the DMA-mapping API to provide DMA
43 * addresses. They must not be used by drivers.
44 */
45 #ifndef __arch_pfn_to_dma
46 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
47 {
48 return (dma_addr_t)__pfn_to_bus(pfn);
49 }
50
51 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
52 {
53 return __bus_to_pfn(addr);
54 }
55
56 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
57 {
58 return (void *)__bus_to_virt((unsigned long)addr);
59 }
60
61 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
62 {
63 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
64 }
65 #else
66 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
67 {
68 return __arch_pfn_to_dma(dev, pfn);
69 }
70
71 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
72 {
73 return __arch_dma_to_pfn(dev, addr);
74 }
75
76 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
77 {
78 return __arch_dma_to_virt(dev, addr);
79 }
80
81 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
82 {
83 return __arch_virt_to_dma(dev, addr);
84 }
85 #endif
86
87 /*
88 * The DMA API is built upon the notion of "buffer ownership". A buffer
89 * is either exclusively owned by the CPU (and therefore may be accessed
90 * by it) or exclusively owned by the DMA device. These helper functions
91 * represent the transitions between these two ownership states.
92 *
93 * Note, however, that on later ARMs, this notion does not work due to
94 * speculative prefetches. We model our approach on the assumption that
95 * the CPU does do speculative prefetches, which means we clean caches
96 * before transfers and delay cache invalidation until transfer completion.
97 *
98 * Private support functions: these are not part of the API and are
99 * liable to change. Drivers must not use these.
100 */
101 static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
102 enum dma_data_direction dir)
103 {
104 extern void ___dma_single_cpu_to_dev(const void *, size_t,
105 enum dma_data_direction);
106
107 if (!arch_is_coherent())
108 ___dma_single_cpu_to_dev(kaddr, size, dir);
109 }
110
111 static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
112 enum dma_data_direction dir)
113 {
114 extern void ___dma_single_dev_to_cpu(const void *, size_t,
115 enum dma_data_direction);
116
117 if (!arch_is_coherent())
118 ___dma_single_dev_to_cpu(kaddr, size, dir);
119 }
120
121 static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
122 size_t size, enum dma_data_direction dir)
123 {
124 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
125 size_t, enum dma_data_direction);
126
127 if (!arch_is_coherent())
128 ___dma_page_cpu_to_dev(page, off, size, dir);
129 }
130
131 static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
132 size_t size, enum dma_data_direction dir)
133 {
134 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
135 size_t, enum dma_data_direction);
136
137 if (!arch_is_coherent())
138 ___dma_page_dev_to_cpu(page, off, size, dir);
139 }
140
141 extern int dma_supported(struct device *, u64);
142 extern int dma_set_mask(struct device *, u64);
143 /*
144 * DMA errors are defined by all-bits-set in the DMA address.
145 */
146 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
147 {
148 return dma_addr == DMA_ERROR_CODE;
149 }
150
151 /*
152 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
153 * function so drivers using this API are highlighted with build warnings.
154 */
155 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
156 dma_addr_t *handle, gfp_t gfp)
157 {
158 return NULL;
159 }
160
161 static inline void dma_free_noncoherent(struct device *dev, size_t size,
162 void *cpu_addr, dma_addr_t handle)
163 {
164 }
165
166 /**
167 * dma_alloc_coherent - allocate consistent memory for DMA
168 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
169 * @size: required memory size
170 * @handle: bus-specific DMA address
171 *
172 * Allocate some uncached, unbuffered memory for a device for
173 * performing DMA. This function allocates pages, and will
174 * return the CPU-viewed address, and sets @handle to be the
175 * device-viewed address.
176 */
177 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
178
179 /**
180 * dma_free_coherent - free memory allocated by dma_alloc_coherent
181 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
182 * @size: size of memory originally requested in dma_alloc_coherent
183 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
184 * @handle: device-view address returned from dma_alloc_coherent
185 *
186 * Free (and unmap) a DMA buffer previously allocated by
187 * dma_alloc_coherent().
188 *
189 * References to memory and mappings associated with cpu_addr/handle
190 * during and after this call executing are illegal.
191 */
192 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
193
194 /**
195 * dma_mmap_coherent - map a coherent DMA allocation into user space
196 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
197 * @vma: vm_area_struct describing requested user mapping
198 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
199 * @handle: device-view address returned from dma_alloc_coherent
200 * @size: size of memory originally requested in dma_alloc_coherent
201 *
202 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
203 * into user space. The coherent DMA buffer must not be freed by the
204 * driver until the user space mapping has been released.
205 */
206 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
207 void *, dma_addr_t, size_t);
208
209
210 /**
211 * dma_alloc_writecombine - allocate writecombining memory for DMA
212 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
213 * @size: required memory size
214 * @handle: bus-specific DMA address
215 *
216 * Allocate some uncached, buffered memory for a device for
217 * performing DMA. This function allocates pages, and will
218 * return the CPU-viewed address, and sets @handle to be the
219 * device-viewed address.
220 */
221 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
222 gfp_t);
223
224 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
225 dma_free_coherent(dev,size,cpu_addr,handle)
226
227 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
228 void *, dma_addr_t, size_t);
229
230 /*
231 * This can be called during boot to increase the size of the consistent
232 * DMA region above it's default value of 2MB. It must be called before the
233 * memory allocator is initialised, i.e. before any core_initcall.
234 */
235 extern void __init init_consistent_dma_size(unsigned long size);
236
237
238 #ifdef CONFIG_DMABOUNCE
239 /*
240 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
241 * and utilize bounce buffers as needed to work around limited DMA windows.
242 *
243 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
244 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
245 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
246 *
247 * The following are helper functions used by the dmabounce subystem
248 *
249 */
250
251 /**
252 * dmabounce_register_dev
253 *
254 * @dev: valid struct device pointer
255 * @small_buf_size: size of buffers to use with small buffer pool
256 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
257 * @needs_bounce_fn: called to determine whether buffer needs bouncing
258 *
259 * This function should be called by low-level platform code to register
260 * a device as requireing DMA buffer bouncing. The function will allocate
261 * appropriate DMA pools for the device.
262 */
263 extern int dmabounce_register_dev(struct device *, unsigned long,
264 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
265
266 /**
267 * dmabounce_unregister_dev
268 *
269 * @dev: valid struct device pointer
270 *
271 * This function should be called by low-level platform code when device
272 * that was previously registered with dmabounce_register_dev is removed
273 * from the system.
274 *
275 */
276 extern void dmabounce_unregister_dev(struct device *);
277
278 /*
279 * The DMA API, implemented by dmabounce.c. See below for descriptions.
280 */
281 extern dma_addr_t __dma_map_page(struct device *, struct page *,
282 unsigned long, size_t, enum dma_data_direction);
283 extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
284 enum dma_data_direction);
285
286 /*
287 * Private functions
288 */
289 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
290 int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
291 #else
292 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
293 size_t size, enum dma_data_direction dir)
294 {
295 return 1;
296 }
297
298 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
299 size_t size, enum dma_data_direction dir)
300 {
301 return 1;
302 }
303
304
305 static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
306 unsigned long offset, size_t size, enum dma_data_direction dir)
307 {
308 __dma_page_cpu_to_dev(page, offset, size, dir);
309 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
310 }
311
312 static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
313 size_t size, enum dma_data_direction dir)
314 {
315 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
316 handle & ~PAGE_MASK, size, dir);
317 }
318 #endif /* CONFIG_DMABOUNCE */
319
320 /*
321 * The scatter list versions of the above methods.
322 */
323 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
324 enum dma_data_direction, struct dma_attrs *attrs);
325 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
326 enum dma_data_direction, struct dma_attrs *attrs);
327 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
328 enum dma_data_direction);
329 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
330 enum dma_data_direction);
331
332 #endif /* __KERNEL__ */
333 #endif
This page took 0.0432 seconds and 6 git commands to generate.