Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
78b09735 SR |
2 | * Copyright (C) 2004 IBM |
3 | * | |
4 | * Implements the generic device dma API for powerpc. | |
5 | * the pci and vio busses | |
1da177e4 | 6 | */ |
78b09735 SR |
7 | #ifndef _ASM_DMA_MAPPING_H |
8 | #define _ASM_DMA_MAPPING_H | |
33ff910f AB |
9 | #ifdef __KERNEL__ |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/cache.h> | |
13 | /* need struct page definitions */ | |
14 | #include <linux/mm.h> | |
15 | #include <linux/scatterlist.h> | |
3affedc4 | 16 | #include <linux/dma-attrs.h> |
33ff910f | 17 | #include <asm/io.h> |
ec3cf2ec | 18 | #include <asm/swiotlb.h> |
33ff910f AB |
19 | |
20 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | |
21 | ||
ec3cf2ec BB |
22 | /* Some dma direct funcs must be visible for use in other dma_ops */ |
23 | extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |
24 | dma_addr_t *dma_handle, gfp_t flag); | |
25 | extern void dma_direct_free_coherent(struct device *dev, size_t size, | |
26 | void *vaddr, dma_addr_t dma_handle); | |
27 | ||
28 | extern unsigned long get_dma_direct_offset(struct device *dev); | |
29 | ||
33ff910f AB |
30 | #ifdef CONFIG_NOT_COHERENT_CACHE |
31 | /* | |
32 | * DMA-consistent mapping functions for PowerPCs that don't support | |
33 | * cache snooping. These allocate/free a region of uncached mapped | |
34 | * memory space for use with DMA devices. Alternatively, you could | |
35 | * allocate the space "normally" and use the cache management functions | |
36 | * to ensure it is consistent. | |
37 | */ | |
8b31e49d BH |
38 | struct device; |
39 | extern void *__dma_alloc_coherent(struct device *dev, size_t size, | |
40 | dma_addr_t *handle, gfp_t gfp); | |
33ff910f AB |
41 | extern void __dma_free_coherent(size_t size, void *vaddr); |
42 | extern void __dma_sync(void *vaddr, size_t size, int direction); | |
43 | extern void __dma_sync_page(struct page *page, unsigned long offset, | |
44 | size_t size, int direction); | |
45 | ||
46 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | |
47 | /* | |
48 | * Cache coherent cores. | |
49 | */ | |
50 | ||
8b31e49d | 51 | #define __dma_alloc_coherent(dev, gfp, size, handle) NULL |
33ff910f AB |
52 | #define __dma_free_coherent(size, addr) ((void)0) |
53 | #define __dma_sync(addr, size, rw) ((void)0) | |
54 | #define __dma_sync_page(pg, off, sz, rw) ((void)0) | |
55 | ||
56 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | |
57 | ||
3a4c6f0b MN |
58 | static inline unsigned long device_to_mask(struct device *dev) |
59 | { | |
60 | if (dev->dma_mask && *dev->dma_mask) | |
61 | return *dev->dma_mask; | |
62 | /* Assume devices without mask can take 32 bit addresses */ | |
63 | return 0xfffffffful; | |
64 | } | |
65 | ||
33ff910f AB |
66 | /* |
67 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | |
68 | */ | |
69 | struct dma_mapping_ops { | |
70 | void * (*alloc_coherent)(struct device *dev, size_t size, | |
71 | dma_addr_t *dma_handle, gfp_t flag); | |
72 | void (*free_coherent)(struct device *dev, size_t size, | |
73 | void *vaddr, dma_addr_t dma_handle); | |
33ff910f | 74 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
75 | int nents, enum dma_data_direction direction, |
76 | struct dma_attrs *attrs); | |
33ff910f | 77 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
78 | int nents, enum dma_data_direction direction, |
79 | struct dma_attrs *attrs); | |
33ff910f AB |
80 | int (*dma_supported)(struct device *dev, u64 mask); |
81 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | |
4fc665b8 BB |
82 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
83 | unsigned long offset, size_t size, | |
84 | enum dma_data_direction direction, | |
85 | struct dma_attrs *attrs); | |
86 | void (*unmap_page)(struct device *dev, | |
87 | dma_addr_t dma_address, size_t size, | |
88 | enum dma_data_direction direction, | |
89 | struct dma_attrs *attrs); | |
ec3cf2ec BB |
90 | int (*addr_needs_map)(struct device *dev, dma_addr_t addr, |
91 | size_t size); | |
15e09c0e BB |
92 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS |
93 | void (*sync_single_range_for_cpu)(struct device *hwdev, | |
94 | dma_addr_t dma_handle, unsigned long offset, | |
95 | size_t size, | |
96 | enum dma_data_direction direction); | |
97 | void (*sync_single_range_for_device)(struct device *hwdev, | |
98 | dma_addr_t dma_handle, unsigned long offset, | |
99 | size_t size, | |
100 | enum dma_data_direction direction); | |
101 | void (*sync_sg_for_cpu)(struct device *hwdev, | |
102 | struct scatterlist *sg, int nelems, | |
103 | enum dma_data_direction direction); | |
104 | void (*sync_sg_for_device)(struct device *hwdev, | |
105 | struct scatterlist *sg, int nelems, | |
106 | enum dma_data_direction direction); | |
107 | #endif | |
33ff910f AB |
108 | }; |
109 | ||
4fc665b8 BB |
110 | /* |
111 | * Available generic sets of operations | |
112 | */ | |
113 | #ifdef CONFIG_PPC64 | |
114 | extern struct dma_mapping_ops dma_iommu_ops; | |
115 | #endif | |
116 | extern struct dma_mapping_ops dma_direct_ops; | |
117 | ||
33ff910f AB |
118 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
119 | { | |
120 | /* We don't handle the NULL dev case for ISA for now. We could | |
121 | * do it via an out of line call but it is not needed for now. The | |
122 | * only ISA DMA device we support is the floppy and we have a hack | |
123 | * in the floppy driver directly to get a device for us. | |
124 | */ | |
4ae0ff60 | 125 | if (unlikely(dev == NULL)) |
33ff910f | 126 | return NULL; |
4fc665b8 | 127 | |
33ff910f | 128 | return dev->archdata.dma_ops; |
1f62a162 ME |
129 | } |
130 | ||
131 | static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops) | |
132 | { | |
133 | dev->archdata.dma_ops = ops; | |
33ff910f AB |
134 | } |
135 | ||
136 | static inline int dma_supported(struct device *dev, u64 mask) | |
137 | { | |
138 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
139 | ||
140 | if (unlikely(dma_ops == NULL)) | |
141 | return 0; | |
142 | if (dma_ops->dma_supported == NULL) | |
143 | return 1; | |
144 | return dma_ops->dma_supported(dev, mask); | |
145 | } | |
146 | ||
84631f37 ME |
147 | /* We have our own implementation of pci_set_dma_mask() */ |
148 | #define HAVE_ARCH_PCI_SET_DMA_MASK | |
149 | ||
33ff910f AB |
150 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
151 | { | |
152 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
153 | ||
154 | if (unlikely(dma_ops == NULL)) | |
155 | return -EIO; | |
156 | if (dma_ops->set_dma_mask != NULL) | |
157 | return dma_ops->set_dma_mask(dev, dma_mask); | |
158 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
159 | return -EIO; | |
160 | *dev->dma_mask = dma_mask; | |
161 | return 0; | |
162 | } | |
163 | ||
4fc665b8 | 164 | /* |
c73049f6 MN |
165 | * map_/unmap_single actually call through to map/unmap_page now that all the |
166 | * dma_mapping_ops have been converted over. We just have to get the page and | |
167 | * offset to pass through to map_page | |
4fc665b8 | 168 | */ |
3affedc4 MN |
169 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, |
170 | void *cpu_addr, | |
171 | size_t size, | |
172 | enum dma_data_direction direction, | |
173 | struct dma_attrs *attrs) | |
174 | { | |
175 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
176 | ||
177 | BUG_ON(!dma_ops); | |
4fc665b8 | 178 | |
4fc665b8 BB |
179 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), |
180 | (unsigned long)cpu_addr % PAGE_SIZE, size, | |
181 | direction, attrs); | |
3affedc4 MN |
182 | } |
183 | ||
184 | static inline void dma_unmap_single_attrs(struct device *dev, | |
185 | dma_addr_t dma_addr, | |
186 | size_t size, | |
187 | enum dma_data_direction direction, | |
188 | struct dma_attrs *attrs) | |
189 | { | |
190 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
191 | ||
192 | BUG_ON(!dma_ops); | |
4fc665b8 | 193 | |
4fc665b8 | 194 | dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); |
3affedc4 MN |
195 | } |
196 | ||
197 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
198 | struct page *page, | |
199 | unsigned long offset, size_t size, | |
200 | enum dma_data_direction direction, | |
201 | struct dma_attrs *attrs) | |
202 | { | |
203 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
204 | ||
205 | BUG_ON(!dma_ops); | |
4fc665b8 | 206 | |
c73049f6 | 207 | return dma_ops->map_page(dev, page, offset, size, direction, attrs); |
3affedc4 MN |
208 | } |
209 | ||
210 | static inline void dma_unmap_page_attrs(struct device *dev, | |
211 | dma_addr_t dma_address, | |
212 | size_t size, | |
213 | enum dma_data_direction direction, | |
214 | struct dma_attrs *attrs) | |
215 | { | |
216 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
217 | ||
218 | BUG_ON(!dma_ops); | |
4fc665b8 | 219 | |
c73049f6 | 220 | dma_ops->unmap_page(dev, dma_address, size, direction, attrs); |
3affedc4 MN |
221 | } |
222 | ||
223 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |
224 | int nents, enum dma_data_direction direction, | |
225 | struct dma_attrs *attrs) | |
226 | { | |
227 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
228 | ||
229 | BUG_ON(!dma_ops); | |
230 | return dma_ops->map_sg(dev, sg, nents, direction, attrs); | |
231 | } | |
232 | ||
233 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
234 | struct scatterlist *sg, | |
235 | int nhwentries, | |
236 | enum dma_data_direction direction, | |
237 | struct dma_attrs *attrs) | |
238 | { | |
239 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
240 | ||
241 | BUG_ON(!dma_ops); | |
242 | dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs); | |
243 | } | |
244 | ||
33ff910f AB |
245 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
246 | dma_addr_t *dma_handle, gfp_t flag) | |
247 | { | |
248 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
249 | ||
250 | BUG_ON(!dma_ops); | |
251 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | |
252 | } | |
253 | ||
254 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
255 | void *cpu_addr, dma_addr_t dma_handle) | |
256 | { | |
257 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
258 | ||
259 | BUG_ON(!dma_ops); | |
260 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | |
261 | } | |
262 | ||
263 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | |
264 | size_t size, | |
265 | enum dma_data_direction direction) | |
266 | { | |
3affedc4 | 267 | return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL); |
33ff910f AB |
268 | } |
269 | ||
270 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | |
271 | size_t size, | |
272 | enum dma_data_direction direction) | |
273 | { | |
3affedc4 | 274 | dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL); |
33ff910f AB |
275 | } |
276 | ||
277 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
278 | unsigned long offset, size_t size, | |
279 | enum dma_data_direction direction) | |
280 | { | |
3affedc4 | 281 | return dma_map_page_attrs(dev, page, offset, size, direction, NULL); |
33ff910f | 282 | } |
12d04eef BH |
283 | |
284 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
285 | size_t size, | |
286 | enum dma_data_direction direction) | |
287 | { | |
3affedc4 | 288 | dma_unmap_page_attrs(dev, dma_address, size, direction, NULL); |
12d04eef BH |
289 | } |
290 | ||
291 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
292 | int nents, enum dma_data_direction direction) | |
293 | { | |
3affedc4 | 294 | return dma_map_sg_attrs(dev, sg, nents, direction, NULL); |
12d04eef BH |
295 | } |
296 | ||
297 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
298 | int nhwentries, | |
299 | enum dma_data_direction direction) | |
300 | { | |
3affedc4 | 301 | dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); |
12d04eef | 302 | } |
78b09735 | 303 | |
15e09c0e | 304 | #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS |
78b09735 SR |
305 | static inline void dma_sync_single_for_cpu(struct device *dev, |
306 | dma_addr_t dma_handle, size_t size, | |
307 | enum dma_data_direction direction) | |
1da177e4 | 308 | { |
15e09c0e BB |
309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
310 | ||
311 | BUG_ON(!dma_ops); | |
6f0b1c60 ME |
312 | |
313 | if (dma_ops->sync_single_range_for_cpu) | |
314 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | |
15e09c0e | 315 | size, direction); |
1da177e4 LT |
316 | } |
317 | ||
78b09735 SR |
318 | static inline void dma_sync_single_for_device(struct device *dev, |
319 | dma_addr_t dma_handle, size_t size, | |
320 | enum dma_data_direction direction) | |
1da177e4 | 321 | { |
15e09c0e BB |
322 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
323 | ||
324 | BUG_ON(!dma_ops); | |
6f0b1c60 ME |
325 | |
326 | if (dma_ops->sync_single_range_for_device) | |
327 | dma_ops->sync_single_range_for_device(dev, dma_handle, | |
15e09c0e | 328 | 0, size, direction); |
1da177e4 LT |
329 | } |
330 | ||
78b09735 | 331 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
78bdc310 | 332 | struct scatterlist *sgl, int nents, |
78b09735 | 333 | enum dma_data_direction direction) |
1da177e4 | 334 | { |
15e09c0e | 335 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
1da177e4 | 336 | |
15e09c0e | 337 | BUG_ON(!dma_ops); |
6f0b1c60 ME |
338 | |
339 | if (dma_ops->sync_sg_for_cpu) | |
340 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | |
1da177e4 LT |
341 | } |
342 | ||
78b09735 | 343 | static inline void dma_sync_sg_for_device(struct device *dev, |
78bdc310 | 344 | struct scatterlist *sgl, int nents, |
78b09735 | 345 | enum dma_data_direction direction) |
1da177e4 | 346 | { |
15e09c0e | 347 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
1da177e4 | 348 | |
15e09c0e | 349 | BUG_ON(!dma_ops); |
6f0b1c60 ME |
350 | |
351 | if (dma_ops->sync_sg_for_device) | |
352 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | |
15e09c0e BB |
353 | } |
354 | ||
355 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
356 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
357 | enum dma_data_direction direction) | |
358 | { | |
359 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
1da177e4 | 360 | |
15e09c0e | 361 | BUG_ON(!dma_ops); |
6f0b1c60 ME |
362 | |
363 | if (dma_ops->sync_single_range_for_cpu) | |
364 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | |
15e09c0e BB |
365 | offset, size, direction); |
366 | } | |
367 | ||
368 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
369 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
370 | enum dma_data_direction direction) | |
371 | { | |
372 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | |
373 | ||
374 | BUG_ON(!dma_ops); | |
6f0b1c60 ME |
375 | |
376 | if (dma_ops->sync_single_range_for_device) | |
377 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | |
15e09c0e | 378 | size, direction); |
1da177e4 | 379 | } |
15e09c0e | 380 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ |
0efbb57e BB |
381 | static inline void dma_sync_single_for_cpu(struct device *dev, |
382 | dma_addr_t dma_handle, size_t size, | |
383 | enum dma_data_direction direction) | |
384 | { | |
385 | } | |
386 | ||
387 | static inline void dma_sync_single_for_device(struct device *dev, | |
388 | dma_addr_t dma_handle, size_t size, | |
389 | enum dma_data_direction direction) | |
390 | { | |
391 | } | |
392 | ||
393 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
394 | struct scatterlist *sgl, int nents, | |
395 | enum dma_data_direction direction) | |
396 | { | |
397 | } | |
398 | ||
399 | static inline void dma_sync_sg_for_device(struct device *dev, | |
400 | struct scatterlist *sgl, int nents, | |
401 | enum dma_data_direction direction) | |
402 | { | |
403 | } | |
404 | ||
405 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
406 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
407 | enum dma_data_direction direction) | |
408 | { | |
409 | } | |
410 | ||
411 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
412 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
413 | enum dma_data_direction direction) | |
414 | { | |
415 | } | |
15e09c0e | 416 | #endif |
1da177e4 | 417 | |
8d8bb39b | 418 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
78b09735 SR |
419 | { |
420 | #ifdef CONFIG_PPC64 | |
421 | return (dma_addr == DMA_ERROR_CODE); | |
422 | #else | |
423 | return 0; | |
424 | #endif | |
425 | } | |
426 | ||
9a937c91 FT |
427 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
428 | { | |
429 | struct dma_mapping_ops *ops = get_dma_ops(dev); | |
430 | ||
431 | if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) | |
432 | return 0; | |
433 | ||
434 | if (!dev->dma_mask) | |
435 | return 0; | |
436 | ||
437 | return addr + size <= *dev->dma_mask; | |
438 | } | |
439 | ||
8d4f5339 FT |
440 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
441 | { | |
442 | return paddr + get_dma_direct_offset(dev); | |
443 | } | |
444 | ||
445 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | |
446 | { | |
447 | return daddr - get_dma_direct_offset(dev); | |
448 | } | |
449 | ||
1da177e4 LT |
450 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
451 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
452 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
f67637ee | 453 | #define dma_is_consistent(d, h) (0) |
1da177e4 | 454 | #else |
f67637ee | 455 | #define dma_is_consistent(d, h) (1) |
1da177e4 LT |
456 | #endif |
457 | ||
458 | static inline int dma_get_cache_alignment(void) | |
459 | { | |
78b09735 SR |
460 | #ifdef CONFIG_PPC64 |
461 | /* no easy way to get cache size on all processors, so return | |
462 | * the maximum possible, to be safe */ | |
1fd73c6b | 463 | return (1 << INTERNODE_CACHE_SHIFT); |
78b09735 | 464 | #else |
1da177e4 LT |
465 | /* |
466 | * Each processor family will define its own L1_CACHE_SHIFT, | |
467 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
468 | */ | |
469 | return L1_CACHE_BYTES; | |
78b09735 | 470 | #endif |
1da177e4 LT |
471 | } |
472 | ||
d3fa72e4 | 473 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
78b09735 | 474 | enum dma_data_direction direction) |
1da177e4 | 475 | { |
78b09735 | 476 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
477 | __dma_sync(vaddr, size, (int)direction); |
478 | } | |
479 | ||
88ced031 | 480 | #endif /* __KERNEL__ */ |
78b09735 | 481 | #endif /* _ASM_DMA_MAPPING_H */ |