Merge branch 'x86/cleanups' into x86/irq
[deliverable/linux.git] / arch / arm / include / asm / dma-mapping.h
CommitLineData
1da177e4
LT
1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
98ed7d4b 6#include <linux/mm_types.h>
dee9ba82 7#include <linux/scatterlist.h>
1da177e4 8
1fe53268 9#include <asm-generic/dma-coherent.h>
98ed7d4b
RK
10#include <asm/memory.h>
11
12/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
16 */
17#ifndef __arch_page_to_dma
18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19{
20 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
21}
22
23static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
24{
25 return (void *)__bus_to_virt(addr);
26}
27
28static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
29{
30 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
31}
32#else
33static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
34{
35 return __arch_page_to_dma(dev, page);
36}
37
38static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
39{
40 return __arch_dma_to_virt(dev, addr);
41}
42
43static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
44{
45 return __arch_virt_to_dma(dev, addr);
46}
47#endif
1fe53268 48
1da177e4
LT
49/*
50 * DMA-consistent mapping functions. These allocate/free a region of
51 * uncached, unwrite-buffered mapped memory space for use with DMA
52 * devices. This is the "generic" version. The PCI specific version
53 * is in pci.h
105ef9a0
DW
54 *
55 * Note: Drivers should NOT use this function directly, as it will break
56 * platforms with CONFIG_DMABOUNCE.
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
1da177e4 58 */
84aa462e 59extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
1da177e4
LT
60
61/*
62 * Return whether the given device DMA address mask can be supported
63 * properly. For example, if your device can only drive the low 24-bits
64 * during bus mastering, then you would pass 0x00ffffff as the mask
65 * to this function.
7a228aaa 66 *
67 * FIXME: This should really be a platform specific issue - we should
68 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
1da177e4
LT
69 */
70static inline int dma_supported(struct device *dev, u64 mask)
71{
72 return dev->dma_mask && *dev->dma_mask != 0;
73}
74
75static inline int dma_set_mask(struct device *dev, u64 dma_mask)
76{
77 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
78 return -EIO;
79
80 *dev->dma_mask = dma_mask;
81
82 return 0;
83}
84
85static inline int dma_get_cache_alignment(void)
86{
87 return 32;
88}
89
f67637ee 90static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
1da177e4 91{
23759dc6 92 return !!arch_is_coherent();
1da177e4
LT
93}
94
95/*
96 * DMA errors are defined by all-bits-set in the DMA address.
97 */
8d8bb39b 98static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1da177e4
LT
99{
100 return dma_addr == ~0;
101}
102
f454aa6b
RK
103/*
104 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
105 * function so drivers using this API are highlighted with build warnings.
106 */
3216a97b
RK
107static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
108 dma_addr_t *handle, gfp_t gfp)
f454aa6b
RK
109{
110 return NULL;
111}
112
3216a97b
RK
113static inline void dma_free_noncoherent(struct device *dev, size_t size,
114 void *cpu_addr, dma_addr_t handle)
f454aa6b
RK
115{
116}
117
1da177e4
LT
118/**
119 * dma_alloc_coherent - allocate consistent memory for DMA
120 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
121 * @size: required memory size
122 * @handle: bus-specific DMA address
123 *
124 * Allocate some uncached, unbuffered memory for a device for
125 * performing DMA. This function allocates pages, and will
126 * return the CPU-viewed address, and sets @handle to be the
127 * device-viewed address.
128 */
3216a97b 129extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
1da177e4
LT
130
131/**
132 * dma_free_coherent - free memory allocated by dma_alloc_coherent
133 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
134 * @size: size of memory originally requested in dma_alloc_coherent
135 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
136 * @handle: device-view address returned from dma_alloc_coherent
137 *
138 * Free (and unmap) a DMA buffer previously allocated by
139 * dma_alloc_coherent().
140 *
141 * References to memory and mappings associated with cpu_addr/handle
142 * during and after this call executing are illegal.
143 */
3216a97b 144extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
1da177e4
LT
145
146/**
147 * dma_mmap_coherent - map a coherent DMA allocation into user space
148 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
149 * @vma: vm_area_struct describing requested user mapping
150 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
151 * @handle: device-view address returned from dma_alloc_coherent
152 * @size: size of memory originally requested in dma_alloc_coherent
153 *
154 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
155 * into user space. The coherent DMA buffer must not be freed by the
156 * driver until the user space mapping has been released.
157 */
3216a97b
RK
158int dma_mmap_coherent(struct device *, struct vm_area_struct *,
159 void *, dma_addr_t, size_t);
1da177e4
LT
160
161
162/**
163 * dma_alloc_writecombine - allocate writecombining memory for DMA
164 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
165 * @size: required memory size
166 * @handle: bus-specific DMA address
167 *
168 * Allocate some uncached, buffered memory for a device for
169 * performing DMA. This function allocates pages, and will
170 * return the CPU-viewed address, and sets @handle to be the
171 * device-viewed address.
172 */
3216a97b
RK
173extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
174 gfp_t);
1da177e4
LT
175
176#define dma_free_writecombine(dev,size,cpu_addr,handle) \
177 dma_free_coherent(dev,size,cpu_addr,handle)
178
3216a97b
RK
179int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
180 void *, dma_addr_t, size_t);
1da177e4
LT
181
182
8c8a0ec5
RK
183#ifdef CONFIG_DMABOUNCE
184/*
185 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
186 * and utilize bounce buffers as needed to work around limited DMA windows.
187 *
188 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
189 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
190 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
191 *
192 * The following are helper functions used by the dmabounce subystem
193 *
194 */
195
196/**
197 * dmabounce_register_dev
198 *
199 * @dev: valid struct device pointer
200 * @small_buf_size: size of buffers to use with small buffer pool
201 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
202 *
203 * This function should be called by low-level platform code to register
204 * a device as requireing DMA buffer bouncing. The function will allocate
205 * appropriate DMA pools for the device.
206 *
207 */
3216a97b
RK
208extern int dmabounce_register_dev(struct device *, unsigned long,
209 unsigned long);
8c8a0ec5
RK
210
211/**
212 * dmabounce_unregister_dev
213 *
214 * @dev: valid struct device pointer
215 *
216 * This function should be called by low-level platform code when device
217 * that was previously registered with dmabounce_register_dev is removed
218 * from the system.
219 *
220 */
221extern void dmabounce_unregister_dev(struct device *);
222
223/**
224 * dma_needs_bounce
225 *
226 * @dev: valid struct device pointer
227 * @dma_handle: dma_handle of unbounced buffer
228 * @size: size of region being mapped
229 *
230 * Platforms that utilize the dmabounce mechanism must implement
231 * this function.
232 *
233 * The dmabounce routines call this function whenever a dma-mapping
234 * is requested to determine whether a given buffer needs to be bounced
235 * or not. The function must return 0 if the buffer is OK for
236 * DMA access and 1 if the buffer needs to be bounced.
237 *
238 */
239extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
240
125ab12a
RK
241/*
242 * The DMA API, implemented by dmabounce.c. See below for descriptions.
243 */
3216a97b
RK
244extern dma_addr_t dma_map_single(struct device *, void *, size_t,
245 enum dma_data_direction);
246extern dma_addr_t dma_map_page(struct device *, struct page *,
247 unsigned long, size_t, enum dma_data_direction);
248extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
249 enum dma_data_direction);
125ab12a 250
8c8a0ec5
RK
251/*
252 * Private functions
253 */
254int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
3216a97b 255 size_t, enum dma_data_direction);
8c8a0ec5 256int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
3216a97b 257 size_t, enum dma_data_direction);
8c8a0ec5 258#else
9fa76792
RK
259static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
260 unsigned long offset, size_t size, enum dma_data_direction dir)
261{
262 return 1;
263}
264
265static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
266 unsigned long offset, size_t size, enum dma_data_direction dir)
267{
268 return 1;
269}
8c8a0ec5
RK
270
271
1da177e4
LT
272/**
273 * dma_map_single - map a single buffer for streaming DMA
274 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
275 * @cpu_addr: CPU direct mapped address of buffer
276 * @size: size of buffer to map
277 * @dir: DMA transfer direction
278 *
279 * Ensure that any data held in the cache is appropriately discarded
280 * or written back.
281 *
282 * The device owns this memory once this call has completed. The CPU
283 * can regain ownership by calling dma_unmap_single() or
284 * dma_sync_single_for_cpu().
285 */
3216a97b
RK
286static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
287 size_t size, enum dma_data_direction dir)
1da177e4 288{
0e18b5d7
RK
289 BUG_ON(!valid_dma_direction(dir));
290
23759dc6 291 if (!arch_is_coherent())
84aa462e 292 dma_cache_maint(cpu_addr, size, dir);
23759dc6 293
98ed7d4b 294 return virt_to_dma(dev, cpu_addr);
1da177e4 295}
125ab12a 296
1da177e4
LT
297/**
298 * dma_map_page - map a portion of a page for streaming DMA
299 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
300 * @page: page that buffer resides in
301 * @offset: offset into page for start of buffer
302 * @size: size of buffer to map
303 * @dir: DMA transfer direction
304 *
305 * Ensure that any data held in the cache is appropriately discarded
306 * or written back.
307 *
308 * The device owns this memory once this call has completed. The CPU
7807c609 309 * can regain ownership by calling dma_unmap_page().
1da177e4 310 */
3216a97b
RK
311static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
312 unsigned long offset, size_t size, enum dma_data_direction dir)
1da177e4 313{
0e18b5d7
RK
314 BUG_ON(!valid_dma_direction(dir));
315
56f55f8b
RK
316 if (!arch_is_coherent())
317 dma_cache_maint(page_address(page) + offset, size, dir);
318
319 return page_to_dma(dev, page) + offset;
1da177e4
LT
320}
321
322/**
323 * dma_unmap_single - unmap a single buffer previously mapped
324 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
325 * @handle: DMA address of buffer
7807c609
RK
326 * @size: size of buffer (same as passed to dma_map_single)
327 * @dir: DMA transfer direction (same as passed to dma_map_single)
1da177e4
LT
328 *
329 * Unmap a single streaming mode DMA translation. The handle and size
330 * must match what was provided in the previous dma_map_single() call.
331 * All other usages are undefined.
332 *
333 * After this call, reads by the CPU to the buffer are guaranteed to see
334 * whatever the device wrote there.
335 */
3216a97b
RK
336static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
337 size_t size, enum dma_data_direction dir)
1da177e4
LT
338{
339 /* nothing to do */
340}
125ab12a 341#endif /* CONFIG_DMABOUNCE */
1da177e4
LT
342
343/**
344 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
345 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
346 * @handle: DMA address of buffer
7807c609
RK
347 * @size: size of buffer (same as passed to dma_map_page)
348 * @dir: DMA transfer direction (same as passed to dma_map_page)
1da177e4 349 *
7807c609
RK
350 * Unmap a page streaming mode DMA translation. The handle and size
351 * must match what was provided in the previous dma_map_page() call.
1da177e4
LT
352 * All other usages are undefined.
353 *
354 * After this call, reads by the CPU to the buffer are guaranteed to see
355 * whatever the device wrote there.
356 */
3216a97b
RK
357static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
358 size_t size, enum dma_data_direction dir)
1da177e4 359{
98ed7d4b 360 dma_unmap_single(dev, handle, size, dir);
1da177e4
LT
361}
362
1da177e4 363/**
9dd42868 364 * dma_sync_single_range_for_cpu
1da177e4
LT
365 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
366 * @handle: DMA address of buffer
9dd42868
RK
367 * @offset: offset of region to start sync
368 * @size: size of region to sync
369 * @dir: DMA transfer direction (same as passed to dma_map_single)
1da177e4
LT
370 *
371 * Make physical memory consistent for a single streaming mode DMA
372 * translation after a transfer.
373 *
374 * If you perform a dma_map_single() but wish to interrogate the
375 * buffer using the cpu, yet do not wish to teardown the PCI dma
376 * mapping, you must call this function before doing so. At the
377 * next point you give the PCI dma address back to the card, you
378 * must first the perform a dma_sync_for_device, and then the
379 * device again owns the buffer.
380 */
3216a97b
RK
381static inline void dma_sync_single_range_for_cpu(struct device *dev,
382 dma_addr_t handle, unsigned long offset, size_t size,
383 enum dma_data_direction dir)
1da177e4 384{
0e18b5d7
RK
385 BUG_ON(!valid_dma_direction(dir));
386
309dbbab 387 dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
1da177e4
LT
388}
389
3216a97b
RK
390static inline void dma_sync_single_range_for_device(struct device *dev,
391 dma_addr_t handle, unsigned long offset, size_t size,
392 enum dma_data_direction dir)
1da177e4 393{
0e18b5d7
RK
394 BUG_ON(!valid_dma_direction(dir));
395
8c8a0ec5
RK
396 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
397 return;
398
23759dc6 399 if (!arch_is_coherent())
9dd42868 400 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
1da177e4 401}
1da177e4 402
3216a97b
RK
403static inline void dma_sync_single_for_cpu(struct device *dev,
404 dma_addr_t handle, size_t size, enum dma_data_direction dir)
9dd42868
RK
405{
406 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
407}
408
3216a97b
RK
409static inline void dma_sync_single_for_device(struct device *dev,
410 dma_addr_t handle, size_t size, enum dma_data_direction dir)
9dd42868
RK
411{
412 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
413}
414
afd1a321
RK
415/*
416 * The scatter list versions of the above methods.
1da177e4 417 */
3216a97b
RK
418extern int dma_map_sg(struct device *, struct scatterlist *, int,
419 enum dma_data_direction);
420extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
421 enum dma_data_direction);
422extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
423 enum dma_data_direction);
424extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
425 enum dma_data_direction);
afd1a321 426
1da177e4 427
1da177e4
LT
428#endif /* __KERNEL__ */
429#endif
This page took 0.363801 seconds and 5 git commands to generate.