Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | */ | |
10 | #include <linux/config.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | ||
17 | #include <asm/cache.h> | |
18 | #include <asm/io.h> | |
19 | ||
20 | /* | |
21 | * Warning on the terminology - Linux calls an uncached area coherent; | |
22 | * MIPS terminology calls memory areas with hardware maintained coherency | |
23 | * coherent. | |
24 | */ | |
25 | ||
26 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
27 | dma_addr_t * dma_handle, int gfp) | |
28 | { | |
29 | void *ret; | |
30 | /* ignore region specifiers */ | |
31 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
32 | ||
33 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
34 | gfp |= GFP_DMA; | |
35 | ret = (void *) __get_free_pages(gfp, get_order(size)); | |
36 | ||
37 | if (ret != NULL) { | |
38 | memset(ret, 0, size); | |
39 | *dma_handle = virt_to_phys(ret); | |
40 | } | |
41 | ||
42 | return ret; | |
43 | } | |
44 | ||
45 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
46 | ||
47 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
48 | dma_addr_t * dma_handle, int gfp) | |
49 | { | |
50 | void *ret; | |
51 | ||
52 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | |
53 | if (ret) { | |
54 | dma_cache_wback_inv((unsigned long) ret, size); | |
55 | ret = UNCAC_ADDR(ret); | |
56 | } | |
57 | ||
58 | return ret; | |
59 | } | |
60 | ||
61 | EXPORT_SYMBOL(dma_alloc_coherent); | |
62 | ||
63 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
64 | dma_addr_t dma_handle) | |
65 | { | |
66 | free_pages((unsigned long) vaddr, get_order(size)); | |
67 | } | |
68 | ||
69 | EXPORT_SYMBOL(dma_free_noncoherent); | |
70 | ||
71 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
72 | dma_addr_t dma_handle) | |
73 | { | |
74 | unsigned long addr = (unsigned long) vaddr; | |
75 | ||
76 | addr = CAC_ADDR(addr); | |
77 | free_pages(addr, get_order(size)); | |
78 | } | |
79 | ||
80 | EXPORT_SYMBOL(dma_free_coherent); | |
81 | ||
82 | static inline void __dma_sync(unsigned long addr, size_t size, | |
83 | enum dma_data_direction direction) | |
84 | { | |
85 | switch (direction) { | |
86 | case DMA_TO_DEVICE: | |
87 | dma_cache_wback(addr, size); | |
88 | break; | |
89 | ||
90 | case DMA_FROM_DEVICE: | |
91 | dma_cache_inv(addr, size); | |
92 | break; | |
93 | ||
94 | case DMA_BIDIRECTIONAL: | |
95 | dma_cache_wback_inv(addr, size); | |
96 | break; | |
97 | ||
98 | default: | |
99 | BUG(); | |
100 | } | |
101 | } | |
102 | ||
103 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
104 | enum dma_data_direction direction) | |
105 | { | |
106 | unsigned long addr = (unsigned long) ptr; | |
107 | ||
108 | switch (direction) { | |
109 | case DMA_TO_DEVICE: | |
110 | dma_cache_wback(addr, size); | |
111 | break; | |
112 | ||
113 | case DMA_FROM_DEVICE: | |
114 | dma_cache_inv(addr, size); | |
115 | break; | |
116 | ||
117 | case DMA_BIDIRECTIONAL: | |
118 | dma_cache_wback_inv(addr, size); | |
119 | break; | |
120 | ||
121 | default: | |
122 | BUG(); | |
123 | } | |
124 | ||
125 | return virt_to_phys(ptr); | |
126 | } | |
127 | ||
128 | EXPORT_SYMBOL(dma_map_single); | |
129 | ||
130 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
131 | enum dma_data_direction direction) | |
132 | { | |
133 | unsigned long addr; | |
134 | addr = dma_addr + PAGE_OFFSET; | |
135 | ||
136 | switch (direction) { | |
137 | case DMA_TO_DEVICE: | |
138 | //dma_cache_wback(addr, size); | |
139 | break; | |
140 | ||
141 | case DMA_FROM_DEVICE: | |
142 | //dma_cache_inv(addr, size); | |
143 | break; | |
144 | ||
145 | case DMA_BIDIRECTIONAL: | |
146 | //dma_cache_wback_inv(addr, size); | |
147 | break; | |
148 | ||
149 | default: | |
150 | BUG(); | |
151 | } | |
152 | } | |
153 | ||
154 | EXPORT_SYMBOL(dma_unmap_single); | |
155 | ||
156 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
157 | enum dma_data_direction direction) | |
158 | { | |
159 | int i; | |
160 | ||
161 | BUG_ON(direction == DMA_NONE); | |
162 | ||
163 | for (i = 0; i < nents; i++, sg++) { | |
164 | unsigned long addr; | |
165 | ||
166 | addr = (unsigned long) page_address(sg->page); | |
167 | if (addr) | |
168 | __dma_sync(addr + sg->offset, sg->length, direction); | |
169 | sg->dma_address = (dma_addr_t) | |
170 | (page_to_phys(sg->page) + sg->offset); | |
171 | } | |
172 | ||
173 | return nents; | |
174 | } | |
175 | ||
176 | EXPORT_SYMBOL(dma_map_sg); | |
177 | ||
178 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
179 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
180 | { | |
181 | unsigned long addr; | |
182 | ||
183 | BUG_ON(direction == DMA_NONE); | |
184 | ||
185 | addr = (unsigned long) page_address(page) + offset; | |
186 | dma_cache_wback_inv(addr, size); | |
187 | ||
188 | return page_to_phys(page) + offset; | |
189 | } | |
190 | ||
191 | EXPORT_SYMBOL(dma_map_page); | |
192 | ||
193 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
194 | enum dma_data_direction direction) | |
195 | { | |
196 | BUG_ON(direction == DMA_NONE); | |
197 | ||
198 | if (direction != DMA_TO_DEVICE) { | |
199 | unsigned long addr; | |
200 | ||
201 | addr = dma_address + PAGE_OFFSET; | |
202 | dma_cache_wback_inv(addr, size); | |
203 | } | |
204 | } | |
205 | ||
206 | EXPORT_SYMBOL(dma_unmap_page); | |
207 | ||
208 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
209 | enum dma_data_direction direction) | |
210 | { | |
211 | unsigned long addr; | |
212 | int i; | |
213 | ||
214 | BUG_ON(direction == DMA_NONE); | |
215 | ||
216 | if (direction == DMA_TO_DEVICE) | |
217 | return; | |
218 | ||
219 | for (i = 0; i < nhwentries; i++, sg++) { | |
220 | addr = (unsigned long) page_address(sg->page); | |
221 | if (!addr) | |
222 | continue; | |
223 | dma_cache_wback_inv(addr + sg->offset, sg->length); | |
224 | } | |
225 | } | |
226 | ||
227 | EXPORT_SYMBOL(dma_unmap_sg); | |
228 | ||
229 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
230 | size_t size, enum dma_data_direction direction) | |
231 | { | |
232 | unsigned long addr; | |
233 | ||
234 | BUG_ON(direction == DMA_NONE); | |
235 | ||
236 | addr = dma_handle + PAGE_OFFSET; | |
237 | __dma_sync(addr, size, direction); | |
238 | } | |
239 | ||
240 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
241 | ||
242 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
243 | size_t size, enum dma_data_direction direction) | |
244 | { | |
245 | unsigned long addr; | |
246 | ||
247 | BUG_ON(direction == DMA_NONE); | |
248 | ||
249 | addr = dma_handle + PAGE_OFFSET; | |
250 | __dma_sync(addr, size, direction); | |
251 | } | |
252 | ||
253 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
254 | ||
255 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
256 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
257 | { | |
258 | unsigned long addr; | |
259 | ||
260 | BUG_ON(direction == DMA_NONE); | |
261 | ||
262 | addr = dma_handle + offset + PAGE_OFFSET; | |
263 | __dma_sync(addr, size, direction); | |
264 | } | |
265 | ||
266 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
267 | ||
268 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
269 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
270 | { | |
271 | unsigned long addr; | |
272 | ||
273 | BUG_ON(direction == DMA_NONE); | |
274 | ||
275 | addr = dma_handle + offset + PAGE_OFFSET; | |
276 | __dma_sync(addr, size, direction); | |
277 | } | |
278 | ||
279 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
280 | ||
281 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
282 | enum dma_data_direction direction) | |
283 | { | |
284 | int i; | |
285 | ||
286 | BUG_ON(direction == DMA_NONE); | |
287 | ||
288 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
289 | for (i = 0; i < nelems; i++, sg++) | |
290 | __dma_sync((unsigned long)page_address(sg->page), | |
291 | sg->length, direction); | |
292 | } | |
293 | ||
294 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
295 | ||
296 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
297 | enum dma_data_direction direction) | |
298 | { | |
299 | int i; | |
300 | ||
301 | BUG_ON(direction == DMA_NONE); | |
302 | ||
303 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
304 | for (i = 0; i < nelems; i++, sg++) | |
305 | __dma_sync((unsigned long)page_address(sg->page), | |
306 | sg->length, direction); | |
307 | } | |
308 | ||
309 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
310 | ||
311 | int dma_mapping_error(dma_addr_t dma_addr) | |
312 | { | |
313 | return 0; | |
314 | } | |
315 | ||
316 | EXPORT_SYMBOL(dma_mapping_error); | |
317 | ||
318 | int dma_supported(struct device *dev, u64 mask) | |
319 | { | |
320 | /* | |
321 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
322 | * so we can't guarantee allocations that must be | |
323 | * within a tighter range than GFP_DMA.. | |
324 | */ | |
325 | if (mask < 0x00ffffff) | |
326 | return 0; | |
327 | ||
328 | return 1; | |
329 | } | |
330 | ||
331 | EXPORT_SYMBOL(dma_supported); | |
332 | ||
333 | int dma_is_consistent(dma_addr_t dma_addr) | |
334 | { | |
335 | return 1; | |
336 | } | |
337 | ||
338 | EXPORT_SYMBOL(dma_is_consistent); | |
339 | ||
340 | void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) | |
341 | { | |
342 | if (direction == DMA_NONE) | |
343 | return; | |
344 | ||
345 | dma_cache_wback_inv((unsigned long)vaddr, size); | |
346 | } | |
347 | ||
348 | EXPORT_SYMBOL(dma_cache_sync); | |
349 | ||
350 | /* The DAC routines are a PCIism.. */ | |
351 | ||
352 | #ifdef CONFIG_PCI | |
353 | ||
354 | #include <linux/pci.h> | |
355 | ||
356 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | |
357 | struct page *page, unsigned long offset, int direction) | |
358 | { | |
359 | return (dma64_addr_t)page_to_phys(page) + offset; | |
360 | } | |
361 | ||
362 | EXPORT_SYMBOL(pci_dac_page_to_dma); | |
363 | ||
364 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | |
365 | dma64_addr_t dma_addr) | |
366 | { | |
367 | return mem_map + (dma_addr >> PAGE_SHIFT); | |
368 | } | |
369 | ||
370 | EXPORT_SYMBOL(pci_dac_dma_to_page); | |
371 | ||
372 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | |
373 | dma64_addr_t dma_addr) | |
374 | { | |
375 | return dma_addr & ~PAGE_MASK; | |
376 | } | |
377 | ||
378 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | |
379 | ||
380 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | |
381 | dma64_addr_t dma_addr, size_t len, int direction) | |
382 | { | |
383 | BUG_ON(direction == PCI_DMA_NONE); | |
384 | ||
385 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | |
386 | } | |
387 | ||
388 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | |
389 | ||
390 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | |
391 | dma64_addr_t dma_addr, size_t len, int direction) | |
392 | { | |
393 | BUG_ON(direction == PCI_DMA_NONE); | |
394 | ||
395 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | |
396 | } | |
397 | ||
398 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | |
399 | ||
400 | #endif /* CONFIG_PCI */ |