Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef ASMARM_DMA_MAPPING_H |
2 | #define ASMARM_DMA_MAPPING_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | #include <linux/config.h> | |
7 | #include <linux/mm.h> /* need struct page */ | |
8 | ||
9 | #include <asm/scatterlist.h> | |
10 | ||
11 | /* | |
12 | * DMA-consistent mapping functions. These allocate/free a region of | |
13 | * uncached, unwrite-buffered mapped memory space for use with DMA | |
14 | * devices. This is the "generic" version. The PCI specific version | |
15 | * is in pci.h | |
16 | */ | |
17 | extern void consistent_sync(void *kaddr, size_t size, int rw); | |
18 | ||
19 | /* | |
20 | * Return whether the given device DMA address mask can be supported | |
21 | * properly. For example, if your device can only drive the low 24-bits | |
22 | * during bus mastering, then you would pass 0x00ffffff as the mask | |
23 | * to this function. | |
7a228aaa | 24 | * |
25 | * FIXME: This should really be a platform specific issue - we should | |
26 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | |
1da177e4 LT |
27 | */ |
28 | static inline int dma_supported(struct device *dev, u64 mask) | |
29 | { | |
30 | return dev->dma_mask && *dev->dma_mask != 0; | |
31 | } | |
32 | ||
33 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
34 | { | |
35 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
36 | return -EIO; | |
37 | ||
38 | *dev->dma_mask = dma_mask; | |
39 | ||
40 | return 0; | |
41 | } | |
42 | ||
43 | static inline int dma_get_cache_alignment(void) | |
44 | { | |
45 | return 32; | |
46 | } | |
47 | ||
48 | static inline int dma_is_consistent(dma_addr_t handle) | |
49 | { | |
50 | return 0; | |
51 | } | |
52 | ||
53 | /* | |
54 | * DMA errors are defined by all-bits-set in the DMA address. | |
55 | */ | |
56 | static inline int dma_mapping_error(dma_addr_t dma_addr) | |
57 | { | |
58 | return dma_addr == ~0; | |
59 | } | |
60 | ||
61 | /** | |
62 | * dma_alloc_coherent - allocate consistent memory for DMA | |
63 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
64 | * @size: required memory size | |
65 | * @handle: bus-specific DMA address | |
66 | * | |
67 | * Allocate some uncached, unbuffered memory for a device for | |
68 | * performing DMA. This function allocates pages, and will | |
69 | * return the CPU-viewed address, and sets @handle to be the | |
70 | * device-viewed address. | |
71 | */ | |
72 | extern void * | |
f9e3214a | 73 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); |
1da177e4 LT |
74 | |
75 | /** | |
76 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | |
77 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
78 | * @size: size of memory originally requested in dma_alloc_coherent | |
79 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | |
80 | * @handle: device-view address returned from dma_alloc_coherent | |
81 | * | |
82 | * Free (and unmap) a DMA buffer previously allocated by | |
83 | * dma_alloc_coherent(). | |
84 | * | |
85 | * References to memory and mappings associated with cpu_addr/handle | |
86 | * during and after this call executing are illegal. | |
87 | */ | |
88 | extern void | |
89 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |
90 | dma_addr_t handle); | |
91 | ||
92 | /** | |
93 | * dma_mmap_coherent - map a coherent DMA allocation into user space | |
94 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
95 | * @vma: vm_area_struct describing requested user mapping | |
96 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent | |
97 | * @handle: device-view address returned from dma_alloc_coherent | |
98 | * @size: size of memory originally requested in dma_alloc_coherent | |
99 | * | |
100 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent | |
101 | * into user space. The coherent DMA buffer must not be freed by the | |
102 | * driver until the user space mapping has been released. | |
103 | */ | |
104 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |
105 | void *cpu_addr, dma_addr_t handle, size_t size); | |
106 | ||
107 | ||
108 | /** | |
109 | * dma_alloc_writecombine - allocate writecombining memory for DMA | |
110 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
111 | * @size: required memory size | |
112 | * @handle: bus-specific DMA address | |
113 | * | |
114 | * Allocate some uncached, buffered memory for a device for | |
115 | * performing DMA. This function allocates pages, and will | |
116 | * return the CPU-viewed address, and sets @handle to be the | |
117 | * device-viewed address. | |
118 | */ | |
119 | extern void * | |
f9e3214a | 120 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); |
1da177e4 LT |
121 | |
122 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | |
123 | dma_free_coherent(dev,size,cpu_addr,handle) | |
124 | ||
125 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | |
126 | void *cpu_addr, dma_addr_t handle, size_t size); | |
127 | ||
128 | ||
129 | /** | |
130 | * dma_map_single - map a single buffer for streaming DMA | |
131 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
132 | * @cpu_addr: CPU direct mapped address of buffer | |
133 | * @size: size of buffer to map | |
134 | * @dir: DMA transfer direction | |
135 | * | |
136 | * Ensure that any data held in the cache is appropriately discarded | |
137 | * or written back. | |
138 | * | |
139 | * The device owns this memory once this call has completed. The CPU | |
140 | * can regain ownership by calling dma_unmap_single() or | |
141 | * dma_sync_single_for_cpu(). | |
142 | */ | |
143 | #ifndef CONFIG_DMABOUNCE | |
144 | static inline dma_addr_t | |
145 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |
146 | enum dma_data_direction dir) | |
147 | { | |
148 | consistent_sync(cpu_addr, size, dir); | |
149 | return virt_to_dma(dev, (unsigned long)cpu_addr); | |
150 | } | |
151 | #else | |
152 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | |
153 | #endif | |
154 | ||
155 | /** | |
156 | * dma_map_page - map a portion of a page for streaming DMA | |
157 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
158 | * @page: page that buffer resides in | |
159 | * @offset: offset into page for start of buffer | |
160 | * @size: size of buffer to map | |
161 | * @dir: DMA transfer direction | |
162 | * | |
163 | * Ensure that any data held in the cache is appropriately discarded | |
164 | * or written back. | |
165 | * | |
166 | * The device owns this memory once this call has completed. The CPU | |
167 | * can regain ownership by calling dma_unmap_page() or | |
168 | * dma_sync_single_for_cpu(). | |
169 | */ | |
170 | static inline dma_addr_t | |
171 | dma_map_page(struct device *dev, struct page *page, | |
172 | unsigned long offset, size_t size, | |
173 | enum dma_data_direction dir) | |
174 | { | |
175 | return dma_map_single(dev, page_address(page) + offset, size, (int)dir); | |
176 | } | |
177 | ||
178 | /** | |
179 | * dma_unmap_single - unmap a single buffer previously mapped | |
180 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
181 | * @handle: DMA address of buffer | |
182 | * @size: size of buffer to map | |
183 | * @dir: DMA transfer direction | |
184 | * | |
185 | * Unmap a single streaming mode DMA translation. The handle and size | |
186 | * must match what was provided in the previous dma_map_single() call. | |
187 | * All other usages are undefined. | |
188 | * | |
189 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
190 | * whatever the device wrote there. | |
191 | */ | |
192 | #ifndef CONFIG_DMABOUNCE | |
193 | static inline void | |
194 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | |
195 | enum dma_data_direction dir) | |
196 | { | |
197 | /* nothing to do */ | |
198 | } | |
199 | #else | |
200 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | |
201 | #endif | |
202 | ||
203 | /** | |
204 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | |
205 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
206 | * @handle: DMA address of buffer | |
207 | * @size: size of buffer to map | |
208 | * @dir: DMA transfer direction | |
209 | * | |
210 | * Unmap a single streaming mode DMA translation. The handle and size | |
211 | * must match what was provided in the previous dma_map_single() call. | |
212 | * All other usages are undefined. | |
213 | * | |
214 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
215 | * whatever the device wrote there. | |
216 | */ | |
217 | static inline void | |
218 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | |
219 | enum dma_data_direction dir) | |
220 | { | |
221 | dma_unmap_single(dev, handle, size, (int)dir); | |
222 | } | |
223 | ||
224 | /** | |
225 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | |
226 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
227 | * @sg: list of buffers | |
228 | * @nents: number of buffers to map | |
229 | * @dir: DMA transfer direction | |
230 | * | |
231 | * Map a set of buffers described by scatterlist in streaming | |
232 | * mode for DMA. This is the scatter-gather version of the | |
233 | * above dma_map_single interface. Here the scatter gather list | |
234 | * elements are each tagged with the appropriate dma address | |
235 | * and length. They are obtained via sg_dma_{address,length}(SG). | |
236 | * | |
237 | * NOTE: An implementation may be able to use a smaller number of | |
238 | * DMA address/length pairs than there are SG table elements. | |
239 | * (for example via virtual mapping capabilities) | |
240 | * The routine returns the number of addr/length pairs actually | |
241 | * used, at most nents. | |
242 | * | |
243 | * Device ownership issues as mentioned above for dma_map_single are | |
244 | * the same here. | |
245 | */ | |
246 | #ifndef CONFIG_DMABOUNCE | |
247 | static inline int | |
248 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
249 | enum dma_data_direction dir) | |
250 | { | |
251 | int i; | |
252 | ||
253 | for (i = 0; i < nents; i++, sg++) { | |
254 | char *virt; | |
255 | ||
256 | sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; | |
257 | virt = page_address(sg->page) + sg->offset; | |
258 | consistent_sync(virt, sg->length, dir); | |
259 | } | |
260 | ||
261 | return nents; | |
262 | } | |
263 | #else | |
264 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | |
265 | #endif | |
266 | ||
267 | /** | |
268 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | |
269 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
270 | * @sg: list of buffers | |
271 | * @nents: number of buffers to map | |
272 | * @dir: DMA transfer direction | |
273 | * | |
274 | * Unmap a set of streaming mode DMA translations. | |
275 | * Again, CPU read rules concerning calls here are the same as for | |
276 | * dma_unmap_single() above. | |
277 | */ | |
278 | #ifndef CONFIG_DMABOUNCE | |
279 | static inline void | |
280 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
281 | enum dma_data_direction dir) | |
282 | { | |
283 | ||
284 | /* nothing to do */ | |
285 | } | |
286 | #else | |
287 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | |
288 | #endif | |
289 | ||
290 | ||
291 | /** | |
292 | * dma_sync_single_for_cpu | |
293 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
294 | * @handle: DMA address of buffer | |
295 | * @size: size of buffer to map | |
296 | * @dir: DMA transfer direction | |
297 | * | |
298 | * Make physical memory consistent for a single streaming mode DMA | |
299 | * translation after a transfer. | |
300 | * | |
301 | * If you perform a dma_map_single() but wish to interrogate the | |
302 | * buffer using the cpu, yet do not wish to teardown the PCI dma | |
303 | * mapping, you must call this function before doing so. At the | |
304 | * next point you give the PCI dma address back to the card, you | |
305 | * must first the perform a dma_sync_for_device, and then the | |
306 | * device again owns the buffer. | |
307 | */ | |
308 | #ifndef CONFIG_DMABOUNCE | |
309 | static inline void | |
310 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | |
311 | enum dma_data_direction dir) | |
312 | { | |
313 | consistent_sync((void *)dma_to_virt(dev, handle), size, dir); | |
314 | } | |
315 | ||
316 | static inline void | |
317 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | |
318 | enum dma_data_direction dir) | |
319 | { | |
320 | consistent_sync((void *)dma_to_virt(dev, handle), size, dir); | |
321 | } | |
322 | #else | |
323 | extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); | |
324 | extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction); | |
325 | #endif | |
326 | ||
327 | ||
328 | /** | |
329 | * dma_sync_sg_for_cpu | |
330 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
331 | * @sg: list of buffers | |
332 | * @nents: number of buffers to map | |
333 | * @dir: DMA transfer direction | |
334 | * | |
335 | * Make physical memory consistent for a set of streaming | |
336 | * mode DMA translations after a transfer. | |
337 | * | |
338 | * The same as dma_sync_single_for_* but for a scatter-gather list, | |
339 | * same rules and usage. | |
340 | */ | |
341 | #ifndef CONFIG_DMABOUNCE | |
342 | static inline void | |
343 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |
344 | enum dma_data_direction dir) | |
345 | { | |
346 | int i; | |
347 | ||
348 | for (i = 0; i < nents; i++, sg++) { | |
349 | char *virt = page_address(sg->page) + sg->offset; | |
350 | consistent_sync(virt, sg->length, dir); | |
351 | } | |
352 | } | |
353 | ||
354 | static inline void | |
355 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
356 | enum dma_data_direction dir) | |
357 | { | |
358 | int i; | |
359 | ||
360 | for (i = 0; i < nents; i++, sg++) { | |
361 | char *virt = page_address(sg->page) + sg->offset; | |
362 | consistent_sync(virt, sg->length, dir); | |
363 | } | |
364 | } | |
365 | #else | |
366 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); | |
367 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | |
368 | #endif | |
369 | ||
370 | #ifdef CONFIG_DMABOUNCE | |
371 | /* | |
372 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | |
373 | * and utilize bounce buffers as needed to work around limited DMA windows. | |
374 | * | |
375 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | |
376 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | |
377 | * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM) | |
378 | * | |
379 | * The following are helper functions used by the dmabounce subystem | |
380 | * | |
381 | */ | |
382 | ||
383 | /** | |
384 | * dmabounce_register_dev | |
385 | * | |
386 | * @dev: valid struct device pointer | |
387 | * @small_buf_size: size of buffers to use with small buffer pool | |
388 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | |
389 | * | |
390 | * This function should be called by low-level platform code to register | |
391 | * a device as requireing DMA buffer bouncing. The function will allocate | |
392 | * appropriate DMA pools for the device. | |
393 | * | |
394 | */ | |
395 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | |
396 | ||
397 | /** | |
398 | * dmabounce_unregister_dev | |
399 | * | |
400 | * @dev: valid struct device pointer | |
401 | * | |
402 | * This function should be called by low-level platform code when device | |
403 | * that was previously registered with dmabounce_register_dev is removed | |
404 | * from the system. | |
405 | * | |
406 | */ | |
407 | extern void dmabounce_unregister_dev(struct device *); | |
408 | ||
409 | /** | |
410 | * dma_needs_bounce | |
411 | * | |
412 | * @dev: valid struct device pointer | |
413 | * @dma_handle: dma_handle of unbounced buffer | |
414 | * @size: size of region being mapped | |
415 | * | |
416 | * Platforms that utilize the dmabounce mechanism must implement | |
417 | * this function. | |
418 | * | |
419 | * The dmabounce routines call this function whenever a dma-mapping | |
420 | * is requested to determine whether a given buffer needs to be bounced | |
421 | * or not. The function must return 0 if the the buffer is OK for | |
422 | * DMA access and 1 if the buffer needs to be bounced. | |
423 | * | |
424 | */ | |
425 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | |
426 | #endif /* CONFIG_DMABOUNCE */ | |
427 | ||
428 | #endif /* __KERNEL__ */ | |
429 | #endif |