Commit | Line | Data |
---|---|---|
1394f032 | 1 | /* |
dd3dd384 | 2 | * bfin_dma_5xx.c - Blackfin DMA implementation |
1394f032 | 3 | * |
9c417a43 | 4 | * Copyright 2004-2008 Analog Devices Inc. |
dd3dd384 | 5 | * Licensed under the GPL-2 or later. |
1394f032 BW |
6 | */ |
7 | ||
8 | #include <linux/errno.h> | |
dd3dd384 MF |
9 | #include <linux/interrupt.h> |
10 | #include <linux/kernel.h> | |
1394f032 | 11 | #include <linux/module.h> |
dd3dd384 | 12 | #include <linux/param.h> |
d642a8ad | 13 | #include <linux/proc_fs.h> |
1394f032 | 14 | #include <linux/sched.h> |
d642a8ad | 15 | #include <linux/seq_file.h> |
dd3dd384 | 16 | #include <linux/spinlock.h> |
1394f032 | 17 | |
24a07a12 | 18 | #include <asm/blackfin.h> |
1394f032 | 19 | #include <asm/cacheflush.h> |
dd3dd384 MF |
20 | #include <asm/dma.h> |
21 | #include <asm/uaccess.h> | |
1394f032 | 22 | |
9c417a43 MF |
23 | struct dma_channel dma_ch[MAX_DMA_CHANNELS]; |
24 | EXPORT_SYMBOL(dma_ch); | |
1394f032 | 25 | |
a161bb05 | 26 | static int __init blackfin_dma_init(void) |
1394f032 BW |
27 | { |
28 | int i; | |
29 | ||
30 | printk(KERN_INFO "Blackfin DMA Controller\n"); | |
31 | ||
211daf9d | 32 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
1394f032 | 33 | dma_ch[i].chan_status = DMA_CHANNEL_FREE; |
77955664 | 34 | dma_ch[i].regs = dma_io_base_addr[i]; |
1394f032 BW |
35 | mutex_init(&(dma_ch[i].dmalock)); |
36 | } | |
23ee968d | 37 | /* Mark MEMDMA Channel 0 as requested since we're using it internally */ |
d642a8ad GY |
38 | request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy"); |
39 | request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy"); | |
a924db7c MH |
40 | |
41 | #if defined(CONFIG_DEB_DMA_URGENT) | |
42 | bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() | |
43 | | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); | |
44 | #endif | |
d642a8ad | 45 | |
1394f032 BW |
46 | return 0; |
47 | } | |
1394f032 BW |
48 | arch_initcall(blackfin_dma_init); |
49 | ||
d642a8ad | 50 | #ifdef CONFIG_PROC_FS |
d642a8ad GY |
51 | static int proc_dma_show(struct seq_file *m, void *v) |
52 | { | |
53 | int i; | |
54 | ||
dd3dd384 | 55 | for (i = 0; i < MAX_DMA_CHANNELS; ++i) |
d642a8ad GY |
56 | if (dma_ch[i].chan_status != DMA_CHANNEL_FREE) |
57 | seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | static int proc_dma_open(struct inode *inode, struct file *file) | |
63 | { | |
64 | return single_open(file, proc_dma_show, NULL); | |
65 | } | |
66 | ||
67 | static const struct file_operations proc_dma_operations = { | |
68 | .open = proc_dma_open, | |
69 | .read = seq_read, | |
70 | .llseek = seq_lseek, | |
71 | .release = single_release, | |
72 | }; | |
73 | ||
74 | static int __init proc_dma_init(void) | |
75 | { | |
76 | return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL; | |
77 | } | |
78 | late_initcall(proc_dma_init); | |
79 | #endif | |
80 | ||
9c417a43 MF |
81 | /** |
82 | * request_dma - request a DMA channel | |
83 | * | |
84 | * Request the specific DMA channel from the system if it's available. | |
85 | */ | |
99532fd2 | 86 | int request_dma(unsigned int channel, const char *device_id) |
1394f032 | 87 | { |
1394f032 | 88 | pr_debug("request_dma() : BEGIN \n"); |
d642a8ad GY |
89 | |
90 | if (device_id == NULL) | |
91 | printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); | |
5ce998cf MH |
92 | |
93 | #if defined(CONFIG_BF561) && ANOMALY_05000182 | |
94 | if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { | |
95 | if (get_cclk() > 500000000) { | |
96 | printk(KERN_WARNING | |
97 | "Request IMDMA failed due to ANOMALY 05000182\n"); | |
98 | return -EFAULT; | |
99 | } | |
100 | } | |
101 | #endif | |
102 | ||
1394f032 BW |
103 | mutex_lock(&(dma_ch[channel].dmalock)); |
104 | ||
105 | if ((dma_ch[channel].chan_status == DMA_CHANNEL_REQUESTED) | |
106 | || (dma_ch[channel].chan_status == DMA_CHANNEL_ENABLED)) { | |
107 | mutex_unlock(&(dma_ch[channel].dmalock)); | |
108 | pr_debug("DMA CHANNEL IN USE \n"); | |
109 | return -EBUSY; | |
110 | } else { | |
111 | dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED; | |
112 | pr_debug("DMA CHANNEL IS ALLOCATED \n"); | |
113 | } | |
114 | ||
115 | mutex_unlock(&(dma_ch[channel].dmalock)); | |
116 | ||
8b01eaff | 117 | #ifdef CONFIG_BF54x |
549aaa84 | 118 | if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { |
ab2375f2 SZ |
119 | unsigned int per_map; |
120 | per_map = dma_ch[channel].regs->peripheral_map & 0xFFF; | |
121 | if (strncmp(device_id, "BFIN_UART", 9) == 0) | |
122 | dma_ch[channel].regs->peripheral_map = per_map | | |
5be36d22 | 123 | ((channel - CH_UART2_RX + 0xC)<<12); |
ab2375f2 SZ |
124 | else |
125 | dma_ch[channel].regs->peripheral_map = per_map | | |
5be36d22 | 126 | ((channel - CH_UART2_RX + 0x6)<<12); |
549aaa84 | 127 | } |
8b01eaff SZ |
128 | #endif |
129 | ||
1394f032 | 130 | dma_ch[channel].device_id = device_id; |
9b011407 | 131 | dma_ch[channel].irq = 0; |
1394f032 BW |
132 | |
133 | /* This is to be enabled by putting a restriction - | |
134 | * you have to request DMA, before doing any operations on | |
135 | * descriptor/channel | |
136 | */ | |
137 | pr_debug("request_dma() : END \n"); | |
596b565b | 138 | return 0; |
1394f032 BW |
139 | } |
140 | EXPORT_SYMBOL(request_dma); | |
141 | ||
68532bda | 142 | int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data) |
1394f032 | 143 | { |
1394f032 | 144 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE |
211daf9d | 145 | && channel < MAX_DMA_CHANNELS)); |
1394f032 BW |
146 | |
147 | if (callback != NULL) { | |
8f1cc233 MF |
148 | int ret; |
149 | unsigned int irq = channel2irq(channel); | |
1394f032 | 150 | |
8f1cc233 MF |
151 | ret = request_irq(irq, callback, IRQF_DISABLED, |
152 | dma_ch[channel].device_id, data); | |
153 | if (ret) | |
154 | return ret; | |
155 | ||
156 | dma_ch[channel].irq = irq; | |
157 | dma_ch[channel].data = data; | |
1394f032 BW |
158 | } |
159 | return 0; | |
160 | } | |
161 | EXPORT_SYMBOL(set_dma_callback); | |
162 | ||
9c417a43 MF |
163 | /** |
164 | * clear_dma_buffer - clear DMA fifos for specified channel | |
165 | * | |
166 | * Set the Buffer Clear bit in the Configuration register of specific DMA | |
167 | * channel. This will stop the descriptor based DMA operation. | |
168 | */ | |
169 | static void clear_dma_buffer(unsigned int channel) | |
170 | { | |
171 | dma_ch[channel].regs->cfg |= RESTART; | |
172 | SSYNC(); | |
173 | dma_ch[channel].regs->cfg &= ~RESTART; | |
174 | } | |
175 | ||
1394f032 BW |
176 | void free_dma(unsigned int channel) |
177 | { | |
1394f032 BW |
178 | pr_debug("freedma() : BEGIN \n"); |
179 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | |
211daf9d | 180 | && channel < MAX_DMA_CHANNELS)); |
1394f032 BW |
181 | |
182 | /* Halt the DMA */ | |
183 | disable_dma(channel); | |
184 | clear_dma_buffer(channel); | |
185 | ||
9b011407 | 186 | if (dma_ch[channel].irq) |
a2ba8b19 | 187 | free_irq(dma_ch[channel].irq, dma_ch[channel].data); |
1394f032 BW |
188 | |
189 | /* Clear the DMA Variable in the Channel */ | |
190 | mutex_lock(&(dma_ch[channel].dmalock)); | |
191 | dma_ch[channel].chan_status = DMA_CHANNEL_FREE; | |
192 | mutex_unlock(&(dma_ch[channel].dmalock)); | |
193 | ||
194 | pr_debug("freedma() : END \n"); | |
195 | } | |
196 | EXPORT_SYMBOL(free_dma); | |
197 | ||
1efc80b5 | 198 | #ifdef CONFIG_PM |
c9e0020d MF |
199 | # ifndef MAX_DMA_SUSPEND_CHANNELS |
200 | # define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS | |
201 | # endif | |
1efc80b5 MH |
202 | int blackfin_dma_suspend(void) |
203 | { | |
204 | int i; | |
205 | ||
c9e0020d | 206 | for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) { |
1efc80b5 MH |
207 | if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) { |
208 | printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); | |
209 | return -EBUSY; | |
210 | } | |
211 | ||
212 | dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | void blackfin_dma_resume(void) | |
219 | { | |
220 | int i; | |
c9e0020d | 221 | for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) |
1efc80b5 MH |
222 | dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; |
223 | } | |
224 | #endif | |
225 | ||
dd3dd384 MF |
226 | /** |
227 | * blackfin_dma_early_init - minimal DMA init | |
228 | * | |
229 | * Setup a few DMA registers so we can safely do DMA transfers early on in | |
230 | * the kernel booting process. Really this just means using dma_memcpy(). | |
231 | */ | |
232 | void __init blackfin_dma_early_init(void) | |
1394f032 | 233 | { |
1394f032 | 234 | bfin_write_MDMA_S0_CONFIG(0); |
1394f032 | 235 | } |
5f9a3e89 | 236 | |
49946e73 | 237 | /** |
dd3dd384 | 238 | * __dma_memcpy - program the MDMA registers |
49946e73 | 239 | * |
dd3dd384 MF |
240 | * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs |
241 | * while programming registers so that everything is fully configured. Wait | |
242 | * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE | |
243 | * check will make sure we don't clobber any existing transfer. | |
49946e73 | 244 | */ |
dd3dd384 | 245 | static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) |
23ee968d | 246 | { |
dd3dd384 | 247 | static DEFINE_SPINLOCK(mdma_lock); |
23ee968d | 248 | unsigned long flags; |
1f83b8f1 | 249 | |
dd3dd384 MF |
250 | spin_lock_irqsave(&mdma_lock, flags); |
251 | ||
252 | if (bfin_read_MDMA_S0_CONFIG()) | |
253 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) | |
254 | continue; | |
255 | ||
256 | if (conf & DMA2D) { | |
257 | /* For larger bit sizes, we've already divided down cnt so it | |
258 | * is no longer a multiple of 64k. So we have to break down | |
259 | * the limit here so it is a multiple of the incoming size. | |
260 | * There is no limitation here in terms of total size other | |
261 | * than the hardware though as the bits lost in the shift are | |
262 | * made up by MODIFY (== we can hit the whole address space). | |
263 | * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4 | |
264 | */ | |
265 | u32 shift = abs(dmod) >> 1; | |
266 | size_t ycnt = cnt >> (16 - shift); | |
267 | cnt = 1 << (16 - shift); | |
268 | bfin_write_MDMA_D0_Y_COUNT(ycnt); | |
269 | bfin_write_MDMA_S0_Y_COUNT(ycnt); | |
270 | bfin_write_MDMA_D0_Y_MODIFY(dmod); | |
271 | bfin_write_MDMA_S0_Y_MODIFY(smod); | |
272 | } | |
1f83b8f1 | 273 | |
dd3dd384 MF |
274 | bfin_write_MDMA_D0_START_ADDR(daddr); |
275 | bfin_write_MDMA_D0_X_COUNT(cnt); | |
276 | bfin_write_MDMA_D0_X_MODIFY(dmod); | |
23ee968d MH |
277 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); |
278 | ||
dd3dd384 MF |
279 | bfin_write_MDMA_S0_START_ADDR(saddr); |
280 | bfin_write_MDMA_S0_X_COUNT(cnt); | |
281 | bfin_write_MDMA_S0_X_MODIFY(smod); | |
23ee968d MH |
282 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); |
283 | ||
dd3dd384 MF |
284 | bfin_write_MDMA_S0_CONFIG(DMAEN | conf); |
285 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf); | |
286 | ||
287 | spin_unlock_irqrestore(&mdma_lock, flags); | |
23ee968d | 288 | |
1a7d91d6 MH |
289 | SSYNC(); |
290 | ||
dd3dd384 MF |
291 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) |
292 | if (bfin_read_MDMA_S0_CONFIG()) | |
293 | continue; | |
294 | else | |
295 | return; | |
23ee968d MH |
296 | |
297 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | |
298 | ||
299 | bfin_write_MDMA_S0_CONFIG(0); | |
300 | bfin_write_MDMA_D0_CONFIG(0); | |
23ee968d | 301 | } |
23ee968d | 302 | |
dd3dd384 MF |
303 | /** |
304 | * _dma_memcpy - translate C memcpy settings into MDMA settings | |
305 | * | |
306 | * Handle all the high level steps before we touch the MDMA registers. So | |
307 | * handle caching, tweaking of sizes, and formatting of addresses. | |
308 | */ | |
309 | static void *_dma_memcpy(void *pdst, const void *psrc, size_t size) | |
23ee968d | 310 | { |
dd3dd384 MF |
311 | u32 conf, shift; |
312 | s16 mod; | |
313 | unsigned long dst = (unsigned long)pdst; | |
314 | unsigned long src = (unsigned long)psrc; | |
1f83b8f1 | 315 | |
dd3dd384 MF |
316 | if (size == 0) |
317 | return NULL; | |
1a7d91d6 | 318 | |
dd3dd384 MF |
319 | if (bfin_addr_dcachable(src)) |
320 | blackfin_dcache_flush_range(src, src + size); | |
23ee968d | 321 | |
dd3dd384 MF |
322 | if (bfin_addr_dcachable(dst)) |
323 | blackfin_dcache_invalidate_range(dst, dst + size); | |
23ee968d | 324 | |
dd3dd384 MF |
325 | if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) { |
326 | conf = WDSIZE_32; | |
327 | shift = 2; | |
328 | } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) { | |
329 | conf = WDSIZE_16; | |
330 | shift = 1; | |
331 | } else { | |
332 | conf = WDSIZE_8; | |
333 | shift = 0; | |
334 | } | |
23ee968d | 335 | |
dd3dd384 MF |
336 | /* If the two memory regions have a chance of overlapping, make |
337 | * sure the memcpy still works as expected. Do this by having the | |
338 | * copy run backwards instead. | |
339 | */ | |
340 | mod = 1 << shift; | |
341 | if (src < dst) { | |
342 | mod *= -1; | |
343 | dst += size + mod; | |
344 | src += size + mod; | |
345 | } | |
346 | size >>= shift; | |
23ee968d | 347 | |
dd3dd384 MF |
348 | if (size > 0x10000) |
349 | conf |= DMA2D; | |
23ee968d | 350 | |
dd3dd384 | 351 | __dma_memcpy(dst, mod, src, mod, size, conf); |
23ee968d | 352 | |
dd3dd384 | 353 | return pdst; |
23ee968d | 354 | } |
23ee968d | 355 | |
dd3dd384 MF |
356 | /** |
357 | * dma_memcpy - DMA memcpy under mutex lock | |
358 | * | |
359 | * Do not check arguments before starting the DMA memcpy. Break the transfer | |
360 | * up into two pieces. The first transfer is in multiples of 64k and the | |
361 | * second transfer is the piece smaller than 64k. | |
362 | */ | |
363 | void *dma_memcpy(void *dst, const void *src, size_t size) | |
23ee968d | 364 | { |
dd3dd384 MF |
365 | size_t bulk, rest; |
366 | bulk = size & ~0xffff; | |
367 | rest = size - bulk; | |
368 | if (bulk) | |
369 | _dma_memcpy(dst, src, bulk); | |
370 | _dma_memcpy(dst + bulk, src + bulk, rest); | |
371 | return dst; | |
23ee968d | 372 | } |
dd3dd384 | 373 | EXPORT_SYMBOL(dma_memcpy); |
23ee968d | 374 | |
dd3dd384 MF |
375 | /** |
376 | * safe_dma_memcpy - DMA memcpy w/argument checking | |
377 | * | |
378 | * Verify arguments are safe before heading to dma_memcpy(). | |
379 | */ | |
380 | void *safe_dma_memcpy(void *dst, const void *src, size_t size) | |
23ee968d | 381 | { |
dd3dd384 MF |
382 | if (!access_ok(VERIFY_WRITE, dst, size)) |
383 | return NULL; | |
384 | if (!access_ok(VERIFY_READ, src, size)) | |
385 | return NULL; | |
386 | return dma_memcpy(dst, src, size); | |
23ee968d | 387 | } |
dd3dd384 | 388 | EXPORT_SYMBOL(safe_dma_memcpy); |
23ee968d | 389 | |
dd3dd384 MF |
390 | static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len, |
391 | u16 size, u16 dma_size) | |
23ee968d | 392 | { |
dd3dd384 MF |
393 | blackfin_dcache_flush_range(buf, buf + len * size); |
394 | __dma_memcpy(addr, 0, buf, size, len, dma_size); | |
23ee968d | 395 | } |
23ee968d | 396 | |
dd3dd384 MF |
397 | static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len, |
398 | u16 size, u16 dma_size) | |
23ee968d | 399 | { |
dd3dd384 MF |
400 | blackfin_dcache_invalidate_range(buf, buf + len * size); |
401 | __dma_memcpy(buf, size, addr, 0, len, dma_size); | |
23ee968d | 402 | } |
dd3dd384 MF |
403 | |
404 | #define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \ | |
405 | void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \ | |
406 | { \ | |
407 | _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \ | |
408 | } \ | |
409 | EXPORT_SYMBOL(dma_##io##s##bwl) | |
410 | MAKE_DMA_IO(out, b, 1, 8, const); | |
411 | MAKE_DMA_IO(in, b, 1, 8, ); | |
412 | MAKE_DMA_IO(out, w, 2, 16, const); | |
413 | MAKE_DMA_IO(in, w, 2, 16, ); | |
414 | MAKE_DMA_IO(out, l, 4, 32, const); | |
415 | MAKE_DMA_IO(in, l, 4, 32, ); |