Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
78b09735 SR |
2 | * Copyright (C) 2004 IBM |
3 | * | |
4 | * Implements the generic device dma API for powerpc. | |
5 | * the pci and vio busses | |
1da177e4 | 6 | */ |
78b09735 SR |
7 | #ifndef _ASM_DMA_MAPPING_H |
8 | #define _ASM_DMA_MAPPING_H | |
33ff910f AB |
9 | #ifdef __KERNEL__ |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/cache.h> | |
13 | /* need struct page definitions */ | |
14 | #include <linux/mm.h> | |
15 | #include <linux/scatterlist.h> | |
3affedc4 | 16 | #include <linux/dma-attrs.h> |
33ff910f AB |
17 | #include <asm/io.h> |
18 | ||
19 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | |
20 | ||
21 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
22 | /* | |
23 | * DMA-consistent mapping functions for PowerPCs that don't support | |
24 | * cache snooping. These allocate/free a region of uncached mapped | |
25 | * memory space for use with DMA devices. Alternatively, you could | |
26 | * allocate the space "normally" and use the cache management functions | |
27 | * to ensure it is consistent. | |
28 | */ | |
29 | extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); | |
30 | extern void __dma_free_coherent(size_t size, void *vaddr); | |
31 | extern void __dma_sync(void *vaddr, size_t size, int direction); | |
32 | extern void __dma_sync_page(struct page *page, unsigned long offset, | |
33 | size_t size, int direction); | |
34 | ||
35 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | |
36 | /* | |
37 | * Cache coherent cores. | |
38 | */ | |
39 | ||
40 | #define __dma_alloc_coherent(gfp, size, handle) NULL | |
41 | #define __dma_free_coherent(size, addr) ((void)0) | |
42 | #define __dma_sync(addr, size, rw) ((void)0) | |
43 | #define __dma_sync_page(pg, off, sz, rw) ((void)0) | |
44 | ||
45 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | |
46 | ||
47 | #ifdef CONFIG_PPC64 | |
48 | /* | |
49 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | |
50 | */ | |
51 | struct dma_mapping_ops { | |
52 | void * (*alloc_coherent)(struct device *dev, size_t size, | |
53 | dma_addr_t *dma_handle, gfp_t flag); | |
54 | void (*free_coherent)(struct device *dev, size_t size, | |
55 | void *vaddr, dma_addr_t dma_handle); | |
56 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | |
3affedc4 MN |
57 | size_t size, enum dma_data_direction direction, |
58 | struct dma_attrs *attrs); | |
33ff910f | 59 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, |
3affedc4 MN |
60 | size_t size, enum dma_data_direction direction, |
61 | struct dma_attrs *attrs); | |
33ff910f | 62 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
63 | int nents, enum dma_data_direction direction, |
64 | struct dma_attrs *attrs); | |
33ff910f | 65 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
66 | int nents, enum dma_data_direction direction, |
67 | struct dma_attrs *attrs); | |
33ff910f AB |
68 | int (*dma_supported)(struct device *dev, u64 mask); |
69 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | |
70 | }; | |
71 | ||
72 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |
73 | { | |
74 | /* We don't handle the NULL dev case for ISA for now. We could | |
75 | * do it via an out of line call but it is not needed for now. The | |
76 | * only ISA DMA device we support is the floppy and we have a hack | |
77 | * in the floppy driver directly to get a device for us. | |
78 | */ | |
79 | if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) | |
80 | return NULL; | |
81 | return dev->archdata.dma_ops; | |
1f62a162 ME |
82 | } |
83 | ||
84 | static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) | |
85 | { | |
86 | dev->archdata.dma_ops = ops; | |
33ff910f AB |
87 | } |
88 | ||
89 | static inline int dma_supported(struct device *dev, u64 mask) | |
90 | { | |
91 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
92 | ||
93 | if (unlikely(dma_ops == NULL)) | |
94 | return 0; | |
95 | if (dma_ops->dma_supported == NULL) | |
96 | return 1; | |
97 | return dma_ops->dma_supported(dev, mask); | |
98 | } | |
99 | ||
84631f37 ME |
100 | /* We have our own implementation of pci_set_dma_mask() */ |
101 | #define HAVE_ARCH_PCI_SET_DMA_MASK | |
102 | ||
33ff910f AB |
103 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
104 | { | |
105 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
106 | ||
107 | if (unlikely(dma_ops == NULL)) | |
108 | return -EIO; | |
109 | if (dma_ops->set_dma_mask != NULL) | |
110 | return dma_ops->set_dma_mask(dev, dma_mask); | |
111 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
112 | return -EIO; | |
113 | *dev->dma_mask = dma_mask; | |
114 | return 0; | |
115 | } | |
116 | ||
3affedc4 MN |
117 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
118 | void *cpu_addr, | |
119 | size_t size, | |
120 | enum dma_data_direction direction, | |
121 | struct dma_attrs *attrs) | |
122 | { | |
123 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
124 | ||
125 | BUG_ON(!dma_ops); | |
126 | return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); | |
127 | } | |
128 | ||
129 | static inline void dma_unmap_single_attrs(struct device *dev, | |
130 | dma_addr_t dma_addr, | |
131 | size_t size, | |
132 | enum dma_data_direction direction, | |
133 | struct dma_attrs *attrs) | |
134 | { | |
135 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
136 | ||
137 | BUG_ON(!dma_ops); | |
138 | dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); | |
139 | } | |
140 | ||
141 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
142 | struct page *page, | |
143 | unsigned long offset, size_t size, | |
144 | enum dma_data_direction direction, | |
145 | struct dma_attrs *attrs) | |
146 | { | |
147 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
148 | ||
149 | BUG_ON(!dma_ops); | |
150 | return dma_ops->map_single(dev, page_address(page) + offset, size, | |
151 | direction, attrs); | |
152 | } | |
153 | ||
154 | static inline void dma_unmap_page_attrs(struct device *dev, | |
155 | dma_addr_t dma_address, | |
156 | size_t size, | |
157 | enum dma_data_direction direction, | |
158 | struct dma_attrs *attrs) | |
159 | { | |
160 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
161 | ||
162 | BUG_ON(!dma_ops); | |
163 | dma_ops->unmap_single(dev, dma_address, size, direction, attrs); | |
164 | } | |
165 | ||
166 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
167 | int nents, enum dma_data_direction direction, | |
168 | struct dma_attrs *attrs) | |
169 | { | |
170 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
171 | ||
172 | BUG_ON(!dma_ops); | |
173 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); | |
174 | } | |
175 | ||
176 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
177 | struct scatterlist *sg, | |
178 | int nhwentries, | |
179 | enum dma_data_direction direction, | |
180 | struct dma_attrs *attrs) | |
181 | { | |
182 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
183 | ||
184 | BUG_ON(!dma_ops); | |
185 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); | |
186 | } | |
187 | ||
33ff910f AB |
188 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
189 | dma_addr_t *dma_handle, gfp_t flag) | |
190 | { | |
191 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
192 | ||
193 | BUG_ON(!dma_ops); | |
194 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | |
195 | } | |
196 | ||
197 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
198 | void *cpu_addr, dma_addr_t dma_handle) | |
199 | { | |
200 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
201 | ||
202 | BUG_ON(!dma_ops); | |
203 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | |
204 | } | |
205 | ||
206 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |
207 | size_t size, | |
208 | enum dma_data_direction direction) | |
209 | { | |
3affedc4 | 210 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); |
33ff910f AB |
211 | } |
212 | ||
213 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | |
214 | size_t size, | |
215 | enum dma_data_direction direction) | |
216 | { | |
3affedc4 | 217 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); |
33ff910f AB |
218 | } |
219 | ||
220 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
221 | unsigned long offset, size_t size, | |
222 | enum dma_data_direction direction) | |
223 | { | |
3affedc4 | 224 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); |
33ff910f | 225 | } |
12d04eef BH |
226 | |
227 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
228 | size_t size, | |
229 | enum dma_data_direction direction) | |
230 | { | |
3affedc4 | 231 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); |
12d04eef BH |
232 | } |
233 | ||
234 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
235 | int nents, enum dma_data_direction direction) | |
236 | { | |
3affedc4 | 237 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); |
12d04eef BH |
238 | } |
239 | ||
240 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
241 | int nhwentries, | |
242 | enum dma_data_direction direction) | |
243 | { | |
3affedc4 | 244 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
12d04eef | 245 | } |
78b09735 | 246 | |
12d04eef BH |
247 | /* |
248 | * Available generic sets of operations | |
249 | */ | |
250 | extern struct dma_mapping_ops dma_iommu_ops; | |
251 | extern struct dma_mapping_ops dma_direct_ops; | |
78b09735 SR |
252 | |
253 | #else /* CONFIG_PPC64 */ | |
254 | ||
1da177e4 LT |
255 | #define dma_supported(dev, mask) (1) |
256 | ||
257 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
258 | { | |
259 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
260 | return -EIO; | |
261 | ||
262 | *dev->dma_mask = dma_mask; | |
263 | ||
264 | return 0; | |
265 | } | |
266 | ||
267 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
d27477c2 | 268 | dma_addr_t * dma_handle, |
dd0fc66f | 269 | gfp_t gfp) |
1da177e4 LT |
270 | { |
271 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
272 | return __dma_alloc_coherent(size, dma_handle, gfp); | |
273 | #else | |
274 | void *ret; | |
275 | /* ignore region specifiers */ | |
276 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
277 | ||
278 | if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) | |
279 | gfp |= GFP_DMA; | |
280 | ||
281 | ret = (void *)__get_free_pages(gfp, get_order(size)); | |
282 | ||
283 | if (ret != NULL) { | |
284 | memset(ret, 0, size); | |
285 | *dma_handle = virt_to_bus(ret); | |
286 | } | |
287 | ||
288 | return ret; | |
289 | #endif | |
290 | } | |
291 | ||
292 | static inline void | |
293 | dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
294 | dma_addr_t dma_handle) | |
295 | { | |
296 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
297 | __dma_free_coherent(size, vaddr); | |
298 | #else | |
299 | free_pages((unsigned long)vaddr, get_order(size)); | |
300 | #endif | |
301 | } | |
302 | ||
303 | static inline dma_addr_t | |
304 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
305 | enum dma_data_direction direction) | |
306 | { | |
307 | BUG_ON(direction == DMA_NONE); | |
308 | ||
309 | __dma_sync(ptr, size, direction); | |
310 | ||
311 | return virt_to_bus(ptr); | |
312 | } | |
313 | ||
f774216d SB |
314 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
315 | size_t size, | |
316 | enum dma_data_direction direction) | |
317 | { | |
318 | /* We do nothing. */ | |
319 | } | |
1da177e4 LT |
320 | |
321 | static inline dma_addr_t | |
322 | dma_map_page(struct device *dev, struct page *page, | |
323 | unsigned long offset, size_t size, | |
324 | enum dma_data_direction direction) | |
325 | { | |
326 | BUG_ON(direction == DMA_NONE); | |
327 | ||
328 | __dma_sync_page(page, offset, size, direction); | |
329 | ||
9f6a3d08 | 330 | return page_to_bus(page) + offset; |
1da177e4 LT |
331 | } |
332 | ||
f774216d SB |
333 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
334 | size_t size, | |
335 | enum dma_data_direction direction) | |
336 | { | |
337 | /* We do nothing. */ | |
338 | } | |
1da177e4 LT |
339 | |
340 | static inline int | |
78bdc310 | 341 | dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
1da177e4 LT |
342 | enum dma_data_direction direction) |
343 | { | |
78bdc310 | 344 | struct scatterlist *sg; |
1da177e4 LT |
345 | int i; |
346 | ||
347 | BUG_ON(direction == DMA_NONE); | |
348 | ||
78bdc310 | 349 | for_each_sg(sgl, sg, nents, i) { |
5edadbd0 OJ |
350 | BUG_ON(!sg_page(sg)); |
351 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | |
352 | sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; | |
1da177e4 LT |
353 | } |
354 | ||
355 | return nents; | |
356 | } | |
357 | ||
f774216d SB |
358 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
359 | int nhwentries, | |
360 | enum dma_data_direction direction) | |
361 | { | |
362 | /* We don't do anything here. */ | |
363 | } | |
1da177e4 | 364 | |
78b09735 SR |
365 | #endif /* CONFIG_PPC64 */ |
366 | ||
367 | static inline void dma_sync_single_for_cpu(struct device *dev, | |
368 | dma_addr_t dma_handle, size_t size, | |
369 | enum dma_data_direction direction) | |
1da177e4 LT |
370 | { |
371 | BUG_ON(direction == DMA_NONE); | |
1da177e4 LT |
372 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
373 | } | |
374 | ||
78b09735 SR |
375 | static inline void dma_sync_single_for_device(struct device *dev, |
376 | dma_addr_t dma_handle, size_t size, | |
377 | enum dma_data_direction direction) | |
1da177e4 LT |
378 | { |
379 | BUG_ON(direction == DMA_NONE); | |
1da177e4 LT |
380 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
381 | } | |
382 | ||
78b09735 | 383 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
78bdc310 | 384 | struct scatterlist *sgl, int nents, |
78b09735 | 385 | enum dma_data_direction direction) |
1da177e4 | 386 | { |
78bdc310 | 387 | struct scatterlist *sg; |
1da177e4 LT |
388 | int i; |
389 | ||
390 | BUG_ON(direction == DMA_NONE); | |
391 | ||
78bdc310 | 392 | for_each_sg(sgl, sg, nents, i) |
5edadbd0 | 393 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
1da177e4 LT |
394 | } |
395 | ||
78b09735 | 396 | static inline void dma_sync_sg_for_device(struct device *dev, |
78bdc310 | 397 | struct scatterlist *sgl, int nents, |
78b09735 | 398 | enum dma_data_direction direction) |
1da177e4 | 399 | { |
78bdc310 | 400 | struct scatterlist *sg; |
1da177e4 LT |
401 | int i; |
402 | ||
403 | BUG_ON(direction == DMA_NONE); | |
404 | ||
78bdc310 | 405 | for_each_sg(sgl, sg, nents, i) |
5edadbd0 | 406 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
1da177e4 LT |
407 | } |
408 | ||
78b09735 SR |
409 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
410 | { | |
411 | #ifdef CONFIG_PPC64 | |
412 | return (dma_addr == DMA_ERROR_CODE); | |
413 | #else | |
414 | return 0; | |
415 | #endif | |
416 | } | |
417 | ||
1da177e4 LT |
418 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
419 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
420 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
f67637ee | 421 | #define dma_is_consistent(d, h) (0) |
1da177e4 | 422 | #else |
f67637ee | 423 | #define dma_is_consistent(d, h) (1) |
1da177e4 LT |
424 | #endif |
425 | ||
426 | static inline int dma_get_cache_alignment(void) | |
427 | { | |
78b09735 SR |
428 | #ifdef CONFIG_PPC64 |
429 | /* no easy way to get cache size on all processors, so return | |
430 | * the maximum possible, to be safe */ | |
1fd73c6b | 431 | return (1 << INTERNODE_CACHE_SHIFT); |
78b09735 | 432 | #else |
1da177e4 LT |
433 | /* |
434 | * Each processor family will define its own L1_CACHE_SHIFT, | |
435 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
436 | */ | |
437 | return L1_CACHE_BYTES; | |
78b09735 | 438 | #endif |
1da177e4 LT |
439 | } |
440 | ||
78b09735 SR |
441 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
442 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
443 | enum dma_data_direction direction) | |
1da177e4 LT |
444 | { |
445 | /* just sync everything for now */ | |
446 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | |
447 | } | |
448 | ||
78b09735 SR |
449 | static inline void dma_sync_single_range_for_device(struct device *dev, |
450 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
451 | enum dma_data_direction direction) | |
1da177e4 LT |
452 | { |
453 | /* just sync everything for now */ | |
454 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | |
455 | } | |
456 | ||
d3fa72e4 | 457 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
78b09735 | 458 | enum dma_data_direction direction) |
1da177e4 | 459 | { |
78b09735 | 460 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
461 | __dma_sync(vaddr, size, (int)direction); |
462 | } | |
463 | ||
88ced031 | 464 | #endif /* __KERNEL__ */ |
78b09735 | 465 | #endif /* _ASM_DMA_MAPPING_H */ |