2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
19 #include <asm/cache.h>
22 #include <dma-coherence.h>
24 static inline unsigned long dma_addr_to_virt(struct device
*dev
,
27 unsigned long addr
= plat_dma_addr_to_phys(dev
, dma_addr
);
29 return (unsigned long)phys_to_virt(addr
);
33 * Warning on the terminology - Linux calls an uncached area coherent;
34 * MIPS terminology calls memory areas with hardware maintained coherency
38 static inline int cpu_is_noncoherent_r10000(struct device
*dev
)
40 return !plat_device_is_coherent(dev
) &&
41 (current_cpu_type() == CPU_R10000
||
42 current_cpu_type() == CPU_R12000
);
45 static gfp_t
massage_gfp_flags(const struct device
*dev
, gfp_t gfp
)
49 /* ignore region specifiers */
50 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
57 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
58 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
60 else if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
61 dma_flag
= __GFP_DMA32
;
64 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
65 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
66 dma_flag
= __GFP_DMA32
;
69 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
70 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
76 /* Don't invoke OOM killer */
79 return gfp
| dma_flag
;
82 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
83 dma_addr_t
* dma_handle
, gfp_t gfp
)
87 gfp
= massage_gfp_flags(dev
, gfp
);
89 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
93 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
99 EXPORT_SYMBOL(dma_alloc_noncoherent
);
101 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
102 dma_addr_t
* dma_handle
, gfp_t gfp
)
106 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
109 gfp
= massage_gfp_flags(dev
, gfp
);
111 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
114 memset(ret
, 0, size
);
115 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
117 if (!plat_device_is_coherent(dev
)) {
118 dma_cache_wback_inv((unsigned long) ret
, size
);
119 ret
= UNCAC_ADDR(ret
);
126 EXPORT_SYMBOL(dma_alloc_coherent
);
128 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
129 dma_addr_t dma_handle
)
131 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
132 free_pages((unsigned long) vaddr
, get_order(size
));
135 EXPORT_SYMBOL(dma_free_noncoherent
);
137 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
138 dma_addr_t dma_handle
)
140 unsigned long addr
= (unsigned long) vaddr
;
141 int order
= get_order(size
);
143 if (dma_release_from_coherent(dev
, order
, vaddr
))
146 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
148 if (!plat_device_is_coherent(dev
))
149 addr
= CAC_ADDR(addr
);
151 free_pages(addr
, get_order(size
));
154 EXPORT_SYMBOL(dma_free_coherent
);
156 static inline void __dma_sync(unsigned long addr
, size_t size
,
157 enum dma_data_direction direction
)
161 dma_cache_wback(addr
, size
);
164 case DMA_FROM_DEVICE
:
165 dma_cache_inv(addr
, size
);
168 case DMA_BIDIRECTIONAL
:
169 dma_cache_wback_inv(addr
, size
);
177 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
178 enum dma_data_direction direction
)
180 unsigned long addr
= (unsigned long) ptr
;
182 if (!plat_device_is_coherent(dev
))
183 __dma_sync(addr
, size
, direction
);
185 return plat_map_dma_mem(dev
, ptr
, size
);
188 EXPORT_SYMBOL(dma_map_single
);
190 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
191 enum dma_data_direction direction
)
193 if (cpu_is_noncoherent_r10000(dev
))
194 __dma_sync(dma_addr_to_virt(dev
, dma_addr
), size
,
197 plat_unmap_dma_mem(dev
, dma_addr
, size
, direction
);
200 EXPORT_SYMBOL(dma_unmap_single
);
202 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
203 enum dma_data_direction direction
)
207 BUG_ON(direction
== DMA_NONE
);
209 for (i
= 0; i
< nents
; i
++, sg
++) {
212 addr
= (unsigned long) sg_virt(sg
);
213 if (!plat_device_is_coherent(dev
) && addr
)
214 __dma_sync(addr
, sg
->length
, direction
);
215 sg
->dma_address
= plat_map_dma_mem(dev
,
216 (void *)addr
, sg
->length
);
222 EXPORT_SYMBOL(dma_map_sg
);
224 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
225 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
227 BUG_ON(direction
== DMA_NONE
);
229 if (!plat_device_is_coherent(dev
)) {
232 addr
= (unsigned long) page_address(page
) + offset
;
233 __dma_sync(addr
, size
, direction
);
236 return plat_map_dma_mem_page(dev
, page
) + offset
;
239 EXPORT_SYMBOL(dma_map_page
);
241 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
242 enum dma_data_direction direction
)
247 BUG_ON(direction
== DMA_NONE
);
249 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
250 if (!plat_device_is_coherent(dev
) &&
251 direction
!= DMA_TO_DEVICE
) {
252 addr
= (unsigned long) sg_virt(sg
);
254 __dma_sync(addr
, sg
->length
, direction
);
256 plat_unmap_dma_mem(dev
, sg
->dma_address
, sg
->length
, direction
);
260 EXPORT_SYMBOL(dma_unmap_sg
);
262 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
263 size_t size
, enum dma_data_direction direction
)
265 BUG_ON(direction
== DMA_NONE
);
267 if (cpu_is_noncoherent_r10000(dev
)) {
270 addr
= dma_addr_to_virt(dev
, dma_handle
);
271 __dma_sync(addr
, size
, direction
);
275 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
277 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
278 size_t size
, enum dma_data_direction direction
)
280 BUG_ON(direction
== DMA_NONE
);
282 plat_extra_sync_for_device(dev
);
283 if (!plat_device_is_coherent(dev
)) {
286 addr
= dma_addr_to_virt(dev
, dma_handle
);
287 __dma_sync(addr
, size
, direction
);
291 EXPORT_SYMBOL(dma_sync_single_for_device
);
293 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
294 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
296 BUG_ON(direction
== DMA_NONE
);
298 if (cpu_is_noncoherent_r10000(dev
)) {
301 addr
= dma_addr_to_virt(dev
, dma_handle
);
302 __dma_sync(addr
+ offset
, size
, direction
);
306 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
308 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
309 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
311 BUG_ON(direction
== DMA_NONE
);
313 plat_extra_sync_for_device(dev
);
314 if (!plat_device_is_coherent(dev
)) {
317 addr
= dma_addr_to_virt(dev
, dma_handle
);
318 __dma_sync(addr
+ offset
, size
, direction
);
322 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
324 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
325 enum dma_data_direction direction
)
329 BUG_ON(direction
== DMA_NONE
);
331 /* Make sure that gcc doesn't leave the empty loop body. */
332 for (i
= 0; i
< nelems
; i
++, sg
++) {
333 if (cpu_is_noncoherent_r10000(dev
))
334 __dma_sync((unsigned long)page_address(sg_page(sg
)),
335 sg
->length
, direction
);
339 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
341 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
342 enum dma_data_direction direction
)
346 BUG_ON(direction
== DMA_NONE
);
348 /* Make sure that gcc doesn't leave the empty loop body. */
349 for (i
= 0; i
< nelems
; i
++, sg
++) {
350 if (!plat_device_is_coherent(dev
))
351 __dma_sync((unsigned long)page_address(sg_page(sg
)),
352 sg
->length
, direction
);
356 EXPORT_SYMBOL(dma_sync_sg_for_device
);
358 int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
360 return plat_dma_mapping_error(dev
, dma_addr
);
363 EXPORT_SYMBOL(dma_mapping_error
);
365 int dma_supported(struct device
*dev
, u64 mask
)
367 return plat_dma_supported(dev
, mask
);
370 EXPORT_SYMBOL(dma_supported
);
372 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
373 enum dma_data_direction direction
)
375 BUG_ON(direction
== DMA_NONE
);
377 plat_extra_sync_for_device(dev
);
378 if (!plat_device_is_coherent(dev
))
379 __dma_sync((unsigned long)vaddr
, size
, direction
);
382 EXPORT_SYMBOL(dma_cache_sync
);