Commit | Line | Data |
---|---|---|
3bfb1d20 HS |
1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | |
3 | * AVR32 systems.) | |
4 | * | |
5 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
327e6970 | 12 | #include <linux/bitops.h> |
3bfb1d20 HS |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
d3f797d9 | 20 | #include <linux/of.h> |
3bfb1d20 HS |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/slab.h> | |
25 | ||
26 | #include "dw_dmac_regs.h" | |
d2ebfb33 | 27 | #include "dmaengine.h" |
3bfb1d20 HS |
28 | |
29 | /* | |
30 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
31 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
32 | * of which use ARM any more). See the "Databook" from Synopsys for | |
33 | * information beyond what licensees probably provide. | |
34 | * | |
35 | * The driver has currently been tested only with the Atmel AT32AP7000, | |
36 | * which does not support descriptor writeback. | |
37 | */ | |
38 | ||
327e6970 VK |
39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
40 | struct dw_dma_slave *__slave = (_chan->private); \ | |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
43 | int _dms = __slave ? __slave->dst_master : 0; \ | |
44 | int _sms = __slave ? __slave->src_master : 1; \ | |
45 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | |
46 | DW_DMA_MSIZE_16; \ | |
47 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | |
48 | DW_DMA_MSIZE_16; \ | |
f301c062 | 49 | \ |
327e6970 VK |
50 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
51 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
52 | | DWC_CTLL_LLP_D_EN \ |
53 | | DWC_CTLL_LLP_S_EN \ | |
327e6970 VK |
54 | | DWC_CTLL_DMS(_dms) \ |
55 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 56 | }) |
3bfb1d20 HS |
57 | |
58 | /* | |
59 | * This is configuration-dependent and usually a funny size like 4095. | |
3bfb1d20 HS |
60 | * |
61 | * Note that this is a transfer count, i.e. if we transfer 32-bit | |
418e7407 | 62 | * words, we can do 16380 bytes per descriptor. |
3bfb1d20 HS |
63 | * |
64 | * This parameter is also system-specific. | |
65 | */ | |
418e7407 | 66 | #define DWC_MAX_COUNT 4095U |
3bfb1d20 HS |
67 | |
68 | /* | |
69 | * Number of descriptors to allocate for each channel. This should be | |
70 | * made configurable somehow; preferably, the clients (at least the | |
71 | * ones using slave transfers) should be able to give us a hint. | |
72 | */ | |
73 | #define NR_DESCS_PER_CHANNEL 64 | |
74 | ||
75 | /*----------------------------------------------------------------------*/ | |
76 | ||
77 | /* | |
78 | * Because we're not relying on writeback from the controller (it may not | |
79 | * even be configured into the core!) we don't need to use dma_pool. These | |
80 | * descriptors -- and associated data -- are cacheable. We do need to make | |
81 | * sure their dcache entries are written back before handing them off to | |
82 | * the controller, though. | |
83 | */ | |
84 | ||
41d5e59c DW |
85 | static struct device *chan2dev(struct dma_chan *chan) |
86 | { | |
87 | return &chan->dev->device; | |
88 | } | |
89 | static struct device *chan2parent(struct dma_chan *chan) | |
90 | { | |
91 | return chan->dev->device.parent; | |
92 | } | |
93 | ||
3bfb1d20 HS |
94 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
95 | { | |
96 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | |
97 | } | |
98 | ||
3bfb1d20 HS |
99 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
100 | { | |
101 | struct dw_desc *desc, *_desc; | |
102 | struct dw_desc *ret = NULL; | |
103 | unsigned int i = 0; | |
69cea5a0 | 104 | unsigned long flags; |
3bfb1d20 | 105 | |
69cea5a0 | 106 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
2ab37276 | 108 | i++; |
3bfb1d20 HS |
109 | if (async_tx_test_ack(&desc->txd)) { |
110 | list_del(&desc->desc_node); | |
111 | ret = desc; | |
112 | break; | |
113 | } | |
41d5e59c | 114 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 | 115 | } |
69cea5a0 | 116 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 117 | |
41d5e59c | 118 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
119 | |
120 | return ret; | |
121 | } | |
122 | ||
123 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
124 | { | |
125 | struct dw_desc *child; | |
126 | ||
e0bd0f8c | 127 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 128 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
129 | child->txd.phys, sizeof(child->lli), |
130 | DMA_TO_DEVICE); | |
41d5e59c | 131 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
132 | desc->txd.phys, sizeof(desc->lli), |
133 | DMA_TO_DEVICE); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Move a descriptor, including any children, to the free list. | |
138 | * `desc' must not be on any lists. | |
139 | */ | |
140 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
141 | { | |
69cea5a0 VK |
142 | unsigned long flags; |
143 | ||
3bfb1d20 HS |
144 | if (desc) { |
145 | struct dw_desc *child; | |
146 | ||
147 | dwc_sync_desc_for_cpu(dwc, desc); | |
148 | ||
69cea5a0 | 149 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 150 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 151 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
152 | "moving child desc %p to freelist\n", |
153 | child); | |
e0bd0f8c | 154 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 155 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 156 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 157 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
158 | } |
159 | } | |
160 | ||
61e183f8 VK |
161 | static void dwc_initialize(struct dw_dma_chan *dwc) |
162 | { | |
163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
164 | struct dw_dma_slave *dws = dwc->chan.private; | |
165 | u32 cfghi = DWC_CFGH_FIFO_MODE; | |
166 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
167 | ||
168 | if (dwc->initialized == true) | |
169 | return; | |
170 | ||
171 | if (dws) { | |
172 | /* | |
173 | * We need controller-specific data to set up slave | |
174 | * transfers. | |
175 | */ | |
176 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | |
177 | ||
178 | cfghi = dws->cfg_hi; | |
179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | |
180 | } | |
181 | ||
182 | channel_writel(dwc, CFG_LO, cfglo); | |
183 | channel_writel(dwc, CFG_HI, cfghi); | |
184 | ||
185 | /* Enable interrupts */ | |
186 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
187 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
188 | ||
189 | dwc->initialized = true; | |
190 | } | |
191 | ||
3bfb1d20 HS |
192 | /*----------------------------------------------------------------------*/ |
193 | ||
4c2d56c5 AS |
194 | static inline unsigned int dwc_fast_fls(unsigned long long v) |
195 | { | |
196 | /* | |
197 | * We can be a lot more clever here, but this should take care | |
198 | * of the most common optimization. | |
199 | */ | |
200 | if (!(v & 7)) | |
201 | return 3; | |
202 | else if (!(v & 3)) | |
203 | return 2; | |
204 | else if (!(v & 1)) | |
205 | return 1; | |
206 | return 0; | |
207 | } | |
208 | ||
1d455437 AS |
209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
210 | { | |
211 | dev_err(chan2dev(&dwc->chan), | |
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
213 | channel_readl(dwc, SAR), | |
214 | channel_readl(dwc, DAR), | |
215 | channel_readl(dwc, LLP), | |
216 | channel_readl(dwc, CTL_HI), | |
217 | channel_readl(dwc, CTL_LO)); | |
218 | } | |
219 | ||
3f936207 AS |
220 | |
221 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
222 | { | |
223 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
224 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
225 | cpu_relax(); | |
226 | } | |
227 | ||
1d455437 AS |
228 | /*----------------------------------------------------------------------*/ |
229 | ||
3bfb1d20 HS |
230 | /* Called with dwc->lock held and bh disabled */ |
231 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
232 | { | |
233 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
234 | ||
235 | /* ASSERT: channel is idle */ | |
236 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 237 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 | 238 | "BUG: Attempted to start non-idle channel\n"); |
1d455437 | 239 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
240 | |
241 | /* The tasklet will hopefully advance the queue... */ | |
242 | return; | |
243 | } | |
244 | ||
61e183f8 VK |
245 | dwc_initialize(dwc); |
246 | ||
3bfb1d20 HS |
247 | channel_writel(dwc, LLP, first->txd.phys); |
248 | channel_writel(dwc, CTL_LO, | |
249 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
250 | channel_writel(dwc, CTL_HI, 0); | |
251 | channel_set_bit(dw, CH_EN, dwc->mask); | |
252 | } | |
253 | ||
254 | /*----------------------------------------------------------------------*/ | |
255 | ||
256 | static void | |
5fedefb8 VK |
257 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
258 | bool callback_required) | |
3bfb1d20 | 259 | { |
5fedefb8 VK |
260 | dma_async_tx_callback callback = NULL; |
261 | void *param = NULL; | |
3bfb1d20 | 262 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 263 | struct dw_desc *child; |
69cea5a0 | 264 | unsigned long flags; |
3bfb1d20 | 265 | |
41d5e59c | 266 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 267 | |
69cea5a0 | 268 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 269 | dma_cookie_complete(txd); |
5fedefb8 VK |
270 | if (callback_required) { |
271 | callback = txd->callback; | |
272 | param = txd->callback_param; | |
273 | } | |
3bfb1d20 HS |
274 | |
275 | dwc_sync_desc_for_cpu(dwc, desc); | |
e518076e VK |
276 | |
277 | /* async_tx_ack */ | |
278 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
279 | async_tx_ack(&child->txd); | |
280 | async_tx_ack(&desc->txd); | |
281 | ||
e0bd0f8c | 282 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
283 | list_move(&desc->desc_node, &dwc->free_list); |
284 | ||
657a77fa AN |
285 | if (!dwc->chan.private) { |
286 | struct device *parent = chan2parent(&dwc->chan); | |
287 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
288 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | |
289 | dma_unmap_single(parent, desc->lli.dar, | |
290 | desc->len, DMA_FROM_DEVICE); | |
291 | else | |
292 | dma_unmap_page(parent, desc->lli.dar, | |
293 | desc->len, DMA_FROM_DEVICE); | |
294 | } | |
295 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
296 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | |
297 | dma_unmap_single(parent, desc->lli.sar, | |
298 | desc->len, DMA_TO_DEVICE); | |
299 | else | |
300 | dma_unmap_page(parent, desc->lli.sar, | |
301 | desc->len, DMA_TO_DEVICE); | |
302 | } | |
303 | } | |
3bfb1d20 | 304 | |
69cea5a0 VK |
305 | spin_unlock_irqrestore(&dwc->lock, flags); |
306 | ||
5fedefb8 | 307 | if (callback_required && callback) |
3bfb1d20 HS |
308 | callback(param); |
309 | } | |
310 | ||
311 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
312 | { | |
313 | struct dw_desc *desc, *_desc; | |
314 | LIST_HEAD(list); | |
69cea5a0 | 315 | unsigned long flags; |
3bfb1d20 | 316 | |
69cea5a0 | 317 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 318 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 319 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
320 | "BUG: XFER bit set, but channel not idle!\n"); |
321 | ||
322 | /* Try to continue after resetting the channel... */ | |
3f936207 | 323 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
324 | } |
325 | ||
326 | /* | |
327 | * Submit queued descriptors ASAP, i.e. before we go through | |
328 | * the completed ones. | |
329 | */ | |
3bfb1d20 | 330 | list_splice_init(&dwc->active_list, &list); |
f336e42f VK |
331 | if (!list_empty(&dwc->queue)) { |
332 | list_move(dwc->queue.next, &dwc->active_list); | |
333 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
334 | } | |
3bfb1d20 | 335 | |
69cea5a0 VK |
336 | spin_unlock_irqrestore(&dwc->lock, flags); |
337 | ||
3bfb1d20 | 338 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 339 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
340 | } |
341 | ||
342 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
343 | { | |
344 | dma_addr_t llp; | |
345 | struct dw_desc *desc, *_desc; | |
346 | struct dw_desc *child; | |
347 | u32 status_xfer; | |
69cea5a0 | 348 | unsigned long flags; |
3bfb1d20 | 349 | |
69cea5a0 | 350 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
351 | llp = channel_readl(dwc, LLP); |
352 | status_xfer = dma_readl(dw, RAW.XFER); | |
353 | ||
354 | if (status_xfer & dwc->mask) { | |
355 | /* Everything we've submitted is done */ | |
356 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
69cea5a0 VK |
357 | spin_unlock_irqrestore(&dwc->lock, flags); |
358 | ||
3bfb1d20 HS |
359 | dwc_complete_all(dw, dwc); |
360 | return; | |
361 | } | |
362 | ||
69cea5a0 VK |
363 | if (list_empty(&dwc->active_list)) { |
364 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 365 | return; |
69cea5a0 | 366 | } |
087809fc | 367 | |
2e4c364e | 368 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, |
2f45d613 | 369 | (unsigned long long)llp); |
3bfb1d20 HS |
370 | |
371 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
84adccfb | 372 | /* check first descriptors addr */ |
69cea5a0 VK |
373 | if (desc->txd.phys == llp) { |
374 | spin_unlock_irqrestore(&dwc->lock, flags); | |
84adccfb | 375 | return; |
69cea5a0 | 376 | } |
84adccfb VK |
377 | |
378 | /* check first descriptors llp */ | |
69cea5a0 | 379 | if (desc->lli.llp == llp) { |
3bfb1d20 | 380 | /* This one is currently in progress */ |
69cea5a0 | 381 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 382 | return; |
69cea5a0 | 383 | } |
3bfb1d20 | 384 | |
e0bd0f8c | 385 | list_for_each_entry(child, &desc->tx_list, desc_node) |
69cea5a0 | 386 | if (child->lli.llp == llp) { |
3bfb1d20 | 387 | /* Currently in progress */ |
69cea5a0 | 388 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 389 | return; |
69cea5a0 | 390 | } |
3bfb1d20 HS |
391 | |
392 | /* | |
393 | * No descriptors so far seem to be in progress, i.e. | |
394 | * this one must be done. | |
395 | */ | |
69cea5a0 | 396 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 397 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 398 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
399 | } |
400 | ||
41d5e59c | 401 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
402 | "BUG: All descriptors done, but channel not idle!\n"); |
403 | ||
404 | /* Try to continue after resetting the channel... */ | |
3f936207 | 405 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
406 | |
407 | if (!list_empty(&dwc->queue)) { | |
f336e42f VK |
408 | list_move(dwc->queue.next, &dwc->active_list); |
409 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
3bfb1d20 | 410 | } |
69cea5a0 | 411 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
412 | } |
413 | ||
93aad1bc | 414 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
3bfb1d20 | 415 | { |
41d5e59c | 416 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 417 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
f8609c2b | 418 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
3bfb1d20 HS |
419 | } |
420 | ||
421 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
422 | { | |
423 | struct dw_desc *bad_desc; | |
424 | struct dw_desc *child; | |
69cea5a0 | 425 | unsigned long flags; |
3bfb1d20 HS |
426 | |
427 | dwc_scan_descriptors(dw, dwc); | |
428 | ||
69cea5a0 VK |
429 | spin_lock_irqsave(&dwc->lock, flags); |
430 | ||
3bfb1d20 HS |
431 | /* |
432 | * The descriptor currently at the head of the active list is | |
433 | * borked. Since we don't have any way to report errors, we'll | |
434 | * just have to scream loudly and try to carry on. | |
435 | */ | |
436 | bad_desc = dwc_first_active(dwc); | |
437 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 438 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
439 | |
440 | /* Clear the error flag and try to restart the controller */ | |
441 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
442 | if (!list_empty(&dwc->active_list)) | |
443 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
444 | ||
445 | /* | |
446 | * KERN_CRITICAL may seem harsh, but since this only happens | |
447 | * when someone submits a bad physical address in a | |
448 | * descriptor, we should consider ourselves lucky that the | |
449 | * controller flagged an error instead of scribbling over | |
450 | * random memory locations. | |
451 | */ | |
41d5e59c | 452 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 453 | "Bad descriptor submitted for DMA!\n"); |
41d5e59c | 454 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
455 | " cookie: %d\n", bad_desc->txd.cookie); |
456 | dwc_dump_lli(dwc, &bad_desc->lli); | |
e0bd0f8c | 457 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
3bfb1d20 HS |
458 | dwc_dump_lli(dwc, &child->lli); |
459 | ||
69cea5a0 VK |
460 | spin_unlock_irqrestore(&dwc->lock, flags); |
461 | ||
3bfb1d20 | 462 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 463 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
464 | } |
465 | ||
d9de4519 HCE |
466 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
467 | ||
468 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | |
469 | { | |
470 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
471 | return channel_readl(dwc, SAR); | |
472 | } | |
473 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
474 | ||
475 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | |
476 | { | |
477 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
478 | return channel_readl(dwc, DAR); | |
479 | } | |
480 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
481 | ||
482 | /* called with dwc->lock held and all DMAC interrupts disabled */ | |
483 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |
ff7b05f2 | 484 | u32 status_err, u32 status_xfer) |
d9de4519 | 485 | { |
69cea5a0 VK |
486 | unsigned long flags; |
487 | ||
ff7b05f2 | 488 | if (dwc->mask) { |
d9de4519 HCE |
489 | void (*callback)(void *param); |
490 | void *callback_param; | |
491 | ||
492 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
493 | channel_readl(dwc, LLP)); | |
d9de4519 HCE |
494 | |
495 | callback = dwc->cdesc->period_callback; | |
496 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
497 | |
498 | if (callback) | |
d9de4519 | 499 | callback(callback_param); |
d9de4519 HCE |
500 | } |
501 | ||
502 | /* | |
503 | * Error and transfer complete are highly unlikely, and will most | |
504 | * likely be due to a configuration error by the user. | |
505 | */ | |
506 | if (unlikely(status_err & dwc->mask) || | |
507 | unlikely(status_xfer & dwc->mask)) { | |
508 | int i; | |
509 | ||
510 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | |
511 | "interrupt, stopping DMA transfer\n", | |
512 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
513 | |
514 | spin_lock_irqsave(&dwc->lock, flags); | |
515 | ||
1d455437 | 516 | dwc_dump_chan_regs(dwc); |
d9de4519 | 517 | |
3f936207 | 518 | dwc_chan_disable(dw, dwc); |
d9de4519 HCE |
519 | |
520 | /* make sure DMA does not restart by loading a new list */ | |
521 | channel_writel(dwc, LLP, 0); | |
522 | channel_writel(dwc, CTL_LO, 0); | |
523 | channel_writel(dwc, CTL_HI, 0); | |
524 | ||
d9de4519 HCE |
525 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
526 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
527 | ||
528 | for (i = 0; i < dwc->cdesc->periods; i++) | |
529 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | |
69cea5a0 VK |
530 | |
531 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 HCE |
532 | } |
533 | } | |
534 | ||
535 | /* ------------------------------------------------------------------------- */ | |
536 | ||
3bfb1d20 HS |
537 | static void dw_dma_tasklet(unsigned long data) |
538 | { | |
539 | struct dw_dma *dw = (struct dw_dma *)data; | |
540 | struct dw_dma_chan *dwc; | |
3bfb1d20 HS |
541 | u32 status_xfer; |
542 | u32 status_err; | |
543 | int i; | |
544 | ||
7fe7b2f4 | 545 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
546 | status_err = dma_readl(dw, RAW.ERROR); |
547 | ||
2e4c364e | 548 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
549 | |
550 | for (i = 0; i < dw->dma.chancnt; i++) { | |
551 | dwc = &dw->chan[i]; | |
d9de4519 | 552 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
ff7b05f2 | 553 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
d9de4519 | 554 | else if (status_err & (1 << i)) |
3bfb1d20 | 555 | dwc_handle_error(dw, dwc); |
ff7b05f2 | 556 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 557 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
558 | } |
559 | ||
560 | /* | |
ff7b05f2 | 561 | * Re-enable interrupts. |
3bfb1d20 HS |
562 | */ |
563 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
564 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
565 | } | |
566 | ||
567 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
568 | { | |
569 | struct dw_dma *dw = dev_id; | |
570 | u32 status; | |
571 | ||
2e4c364e | 572 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, |
3bfb1d20 HS |
573 | dma_readl(dw, STATUS_INT)); |
574 | ||
575 | /* | |
576 | * Just disable the interrupts. We'll turn them back on in the | |
577 | * softirq handler. | |
578 | */ | |
579 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
580 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
581 | ||
582 | status = dma_readl(dw, STATUS_INT); | |
583 | if (status) { | |
584 | dev_err(dw->dma.dev, | |
585 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
586 | status); | |
587 | ||
588 | /* Try to recover */ | |
589 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
3bfb1d20 HS |
590 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
591 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
592 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
593 | } | |
594 | ||
595 | tasklet_schedule(&dw->tasklet); | |
596 | ||
597 | return IRQ_HANDLED; | |
598 | } | |
599 | ||
600 | /*----------------------------------------------------------------------*/ | |
601 | ||
602 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
603 | { | |
604 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
605 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
606 | dma_cookie_t cookie; | |
69cea5a0 | 607 | unsigned long flags; |
3bfb1d20 | 608 | |
69cea5a0 | 609 | spin_lock_irqsave(&dwc->lock, flags); |
884485e1 | 610 | cookie = dma_cookie_assign(tx); |
3bfb1d20 HS |
611 | |
612 | /* | |
613 | * REVISIT: We should attempt to chain as many descriptors as | |
614 | * possible, perhaps even appending to those already submitted | |
615 | * for DMA. But this is hard to do in a race-free manner. | |
616 | */ | |
617 | if (list_empty(&dwc->active_list)) { | |
2e4c364e | 618 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, |
3bfb1d20 | 619 | desc->txd.cookie); |
3bfb1d20 | 620 | list_add_tail(&desc->desc_node, &dwc->active_list); |
f336e42f | 621 | dwc_dostart(dwc, dwc_first_active(dwc)); |
3bfb1d20 | 622 | } else { |
2e4c364e | 623 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, |
3bfb1d20 HS |
624 | desc->txd.cookie); |
625 | ||
626 | list_add_tail(&desc->desc_node, &dwc->queue); | |
627 | } | |
628 | ||
69cea5a0 | 629 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
630 | |
631 | return cookie; | |
632 | } | |
633 | ||
634 | static struct dma_async_tx_descriptor * | |
635 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
636 | size_t len, unsigned long flags) | |
637 | { | |
638 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
639 | struct dw_desc *desc; | |
640 | struct dw_desc *first; | |
641 | struct dw_desc *prev; | |
642 | size_t xfer_count; | |
643 | size_t offset; | |
644 | unsigned int src_width; | |
645 | unsigned int dst_width; | |
646 | u32 ctllo; | |
647 | ||
2f45d613 | 648 | dev_vdbg(chan2dev(chan), |
2e4c364e | 649 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, |
2f45d613 AS |
650 | (unsigned long long)dest, (unsigned long long)src, |
651 | len, flags); | |
3bfb1d20 HS |
652 | |
653 | if (unlikely(!len)) { | |
2e4c364e | 654 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
655 | return NULL; |
656 | } | |
657 | ||
4c2d56c5 | 658 | src_width = dst_width = dwc_fast_fls(src | dest | len); |
3bfb1d20 | 659 | |
327e6970 | 660 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
661 | | DWC_CTLL_DST_WIDTH(dst_width) |
662 | | DWC_CTLL_SRC_WIDTH(src_width) | |
663 | | DWC_CTLL_DST_INC | |
664 | | DWC_CTLL_SRC_INC | |
665 | | DWC_CTLL_FC_M2M; | |
666 | prev = first = NULL; | |
667 | ||
668 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
669 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
670 | DWC_MAX_COUNT); | |
671 | ||
672 | desc = dwc_desc_get(dwc); | |
673 | if (!desc) | |
674 | goto err_desc_get; | |
675 | ||
676 | desc->lli.sar = src + offset; | |
677 | desc->lli.dar = dest + offset; | |
678 | desc->lli.ctllo = ctllo; | |
679 | desc->lli.ctlhi = xfer_count; | |
680 | ||
681 | if (!first) { | |
682 | first = desc; | |
683 | } else { | |
684 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 685 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
686 | prev->txd.phys, sizeof(prev->lli), |
687 | DMA_TO_DEVICE); | |
688 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 689 | &first->tx_list); |
3bfb1d20 HS |
690 | } |
691 | prev = desc; | |
692 | } | |
693 | ||
694 | ||
695 | if (flags & DMA_PREP_INTERRUPT) | |
696 | /* Trigger interrupt after last block */ | |
697 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
698 | ||
699 | prev->lli.llp = 0; | |
41d5e59c | 700 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
701 | prev->txd.phys, sizeof(prev->lli), |
702 | DMA_TO_DEVICE); | |
703 | ||
704 | first->txd.flags = flags; | |
705 | first->len = len; | |
706 | ||
707 | return &first->txd; | |
708 | ||
709 | err_desc_get: | |
710 | dwc_desc_put(dwc, first); | |
711 | return NULL; | |
712 | } | |
713 | ||
714 | static struct dma_async_tx_descriptor * | |
715 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 716 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 717 | unsigned long flags, void *context) |
3bfb1d20 HS |
718 | { |
719 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
287d8592 | 720 | struct dw_dma_slave *dws = chan->private; |
327e6970 | 721 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
722 | struct dw_desc *prev; |
723 | struct dw_desc *first; | |
724 | u32 ctllo; | |
725 | dma_addr_t reg; | |
726 | unsigned int reg_width; | |
727 | unsigned int mem_width; | |
728 | unsigned int i; | |
729 | struct scatterlist *sg; | |
730 | size_t total_len = 0; | |
731 | ||
2e4c364e | 732 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 HS |
733 | |
734 | if (unlikely(!dws || !sg_len)) | |
735 | return NULL; | |
736 | ||
3bfb1d20 HS |
737 | prev = first = NULL; |
738 | ||
3bfb1d20 | 739 | switch (direction) { |
db8196df | 740 | case DMA_MEM_TO_DEV: |
327e6970 VK |
741 | reg_width = __fls(sconfig->dst_addr_width); |
742 | reg = sconfig->dst_addr; | |
743 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
744 | | DWC_CTLL_DST_WIDTH(reg_width) |
745 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
746 | | DWC_CTLL_SRC_INC); |
747 | ||
748 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
749 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
750 | ||
3bfb1d20 HS |
751 | for_each_sg(sgl, sg, sg_len, i) { |
752 | struct dw_desc *desc; | |
69dc14b5 | 753 | u32 len, dlen, mem; |
3bfb1d20 | 754 | |
cbb796cc | 755 | mem = sg_dma_address(sg); |
69dc14b5 | 756 | len = sg_dma_len(sg); |
6bc711f6 | 757 | |
4c2d56c5 | 758 | mem_width = dwc_fast_fls(mem | len); |
3bfb1d20 | 759 | |
69dc14b5 | 760 | slave_sg_todev_fill_desc: |
3bfb1d20 HS |
761 | desc = dwc_desc_get(dwc); |
762 | if (!desc) { | |
41d5e59c | 763 | dev_err(chan2dev(chan), |
3bfb1d20 HS |
764 | "not enough descriptors available\n"); |
765 | goto err_desc_get; | |
766 | } | |
767 | ||
3bfb1d20 HS |
768 | desc->lli.sar = mem; |
769 | desc->lli.dar = reg; | |
770 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | |
69dc14b5 VK |
771 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
772 | dlen = DWC_MAX_COUNT << mem_width; | |
773 | mem += dlen; | |
774 | len -= dlen; | |
775 | } else { | |
776 | dlen = len; | |
777 | len = 0; | |
778 | } | |
779 | ||
780 | desc->lli.ctlhi = dlen >> mem_width; | |
3bfb1d20 HS |
781 | |
782 | if (!first) { | |
783 | first = desc; | |
784 | } else { | |
785 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 786 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
787 | prev->txd.phys, |
788 | sizeof(prev->lli), | |
789 | DMA_TO_DEVICE); | |
790 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 791 | &first->tx_list); |
3bfb1d20 HS |
792 | } |
793 | prev = desc; | |
69dc14b5 VK |
794 | total_len += dlen; |
795 | ||
796 | if (len) | |
797 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
798 | } |
799 | break; | |
db8196df | 800 | case DMA_DEV_TO_MEM: |
327e6970 VK |
801 | reg_width = __fls(sconfig->src_addr_width); |
802 | reg = sconfig->src_addr; | |
803 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
804 | | DWC_CTLL_SRC_WIDTH(reg_width) |
805 | | DWC_CTLL_DST_INC | |
327e6970 VK |
806 | | DWC_CTLL_SRC_FIX); |
807 | ||
808 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
809 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 810 | |
3bfb1d20 HS |
811 | for_each_sg(sgl, sg, sg_len, i) { |
812 | struct dw_desc *desc; | |
69dc14b5 | 813 | u32 len, dlen, mem; |
3bfb1d20 | 814 | |
cbb796cc | 815 | mem = sg_dma_address(sg); |
3bfb1d20 | 816 | len = sg_dma_len(sg); |
6bc711f6 | 817 | |
4c2d56c5 | 818 | mem_width = dwc_fast_fls(mem | len); |
3bfb1d20 | 819 | |
69dc14b5 VK |
820 | slave_sg_fromdev_fill_desc: |
821 | desc = dwc_desc_get(dwc); | |
822 | if (!desc) { | |
823 | dev_err(chan2dev(chan), | |
824 | "not enough descriptors available\n"); | |
825 | goto err_desc_get; | |
826 | } | |
827 | ||
3bfb1d20 HS |
828 | desc->lli.sar = reg; |
829 | desc->lli.dar = mem; | |
830 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | |
69dc14b5 VK |
831 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
832 | dlen = DWC_MAX_COUNT << reg_width; | |
833 | mem += dlen; | |
834 | len -= dlen; | |
835 | } else { | |
836 | dlen = len; | |
837 | len = 0; | |
838 | } | |
839 | desc->lli.ctlhi = dlen >> reg_width; | |
3bfb1d20 HS |
840 | |
841 | if (!first) { | |
842 | first = desc; | |
843 | } else { | |
844 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 845 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
846 | prev->txd.phys, |
847 | sizeof(prev->lli), | |
848 | DMA_TO_DEVICE); | |
849 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 850 | &first->tx_list); |
3bfb1d20 HS |
851 | } |
852 | prev = desc; | |
69dc14b5 VK |
853 | total_len += dlen; |
854 | ||
855 | if (len) | |
856 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
857 | } |
858 | break; | |
859 | default: | |
860 | return NULL; | |
861 | } | |
862 | ||
863 | if (flags & DMA_PREP_INTERRUPT) | |
864 | /* Trigger interrupt after last block */ | |
865 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
866 | ||
867 | prev->lli.llp = 0; | |
41d5e59c | 868 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
869 | prev->txd.phys, sizeof(prev->lli), |
870 | DMA_TO_DEVICE); | |
871 | ||
872 | first->len = total_len; | |
873 | ||
874 | return &first->txd; | |
875 | ||
876 | err_desc_get: | |
877 | dwc_desc_put(dwc, first); | |
878 | return NULL; | |
879 | } | |
880 | ||
327e6970 VK |
881 | /* |
882 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
883 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
884 | * | |
885 | * NOTE: burst size 2 is not supported by controller. | |
886 | * | |
887 | * This can be done by finding least significant bit set: n & (n - 1) | |
888 | */ | |
889 | static inline void convert_burst(u32 *maxburst) | |
890 | { | |
891 | if (*maxburst > 1) | |
892 | *maxburst = fls(*maxburst) - 2; | |
893 | else | |
894 | *maxburst = 0; | |
895 | } | |
896 | ||
897 | static int | |
898 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |
899 | { | |
900 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
901 | ||
902 | /* Check if it is chan is configured for slave transfers */ | |
903 | if (!chan->private) | |
904 | return -EINVAL; | |
905 | ||
906 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
907 | ||
908 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
909 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
910 | ||
911 | return 0; | |
912 | } | |
913 | ||
05827630 LW |
914 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
915 | unsigned long arg) | |
3bfb1d20 HS |
916 | { |
917 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
918 | struct dw_dma *dw = to_dw_dma(chan->device); | |
919 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 920 | unsigned long flags; |
a7c57cf7 | 921 | u32 cfglo; |
3bfb1d20 HS |
922 | LIST_HEAD(list); |
923 | ||
a7c57cf7 LW |
924 | if (cmd == DMA_PAUSE) { |
925 | spin_lock_irqsave(&dwc->lock, flags); | |
c3635c78 | 926 | |
a7c57cf7 LW |
927 | cfglo = channel_readl(dwc, CFG_LO); |
928 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | |
929 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | |
930 | cpu_relax(); | |
3bfb1d20 | 931 | |
a7c57cf7 LW |
932 | dwc->paused = true; |
933 | spin_unlock_irqrestore(&dwc->lock, flags); | |
934 | } else if (cmd == DMA_RESUME) { | |
935 | if (!dwc->paused) | |
936 | return 0; | |
3bfb1d20 | 937 | |
a7c57cf7 | 938 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 939 | |
a7c57cf7 LW |
940 | cfglo = channel_readl(dwc, CFG_LO); |
941 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
942 | dwc->paused = false; | |
3bfb1d20 | 943 | |
a7c57cf7 LW |
944 | spin_unlock_irqrestore(&dwc->lock, flags); |
945 | } else if (cmd == DMA_TERMINATE_ALL) { | |
946 | spin_lock_irqsave(&dwc->lock, flags); | |
3bfb1d20 | 947 | |
3f936207 | 948 | dwc_chan_disable(dw, dwc); |
a7c57cf7 LW |
949 | |
950 | dwc->paused = false; | |
951 | ||
952 | /* active_list entries will end up before queued entries */ | |
953 | list_splice_init(&dwc->queue, &list); | |
954 | list_splice_init(&dwc->active_list, &list); | |
955 | ||
956 | spin_unlock_irqrestore(&dwc->lock, flags); | |
957 | ||
958 | /* Flush all pending and queued descriptors */ | |
959 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
960 | dwc_descriptor_complete(dwc, desc, false); | |
327e6970 VK |
961 | } else if (cmd == DMA_SLAVE_CONFIG) { |
962 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | |
963 | } else { | |
a7c57cf7 | 964 | return -ENXIO; |
327e6970 | 965 | } |
c3635c78 LW |
966 | |
967 | return 0; | |
3bfb1d20 HS |
968 | } |
969 | ||
970 | static enum dma_status | |
07934481 LW |
971 | dwc_tx_status(struct dma_chan *chan, |
972 | dma_cookie_t cookie, | |
973 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
974 | { |
975 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 976 | enum dma_status ret; |
3bfb1d20 | 977 | |
96a2af41 | 978 | ret = dma_cookie_status(chan, cookie, txstate); |
3bfb1d20 HS |
979 | if (ret != DMA_SUCCESS) { |
980 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
981 | ||
96a2af41 | 982 | ret = dma_cookie_status(chan, cookie, txstate); |
3bfb1d20 HS |
983 | } |
984 | ||
abf53902 | 985 | if (ret != DMA_SUCCESS) |
96a2af41 | 986 | dma_set_residue(txstate, dwc_first_active(dwc)->len); |
3bfb1d20 | 987 | |
a7c57cf7 LW |
988 | if (dwc->paused) |
989 | return DMA_PAUSED; | |
3bfb1d20 HS |
990 | |
991 | return ret; | |
992 | } | |
993 | ||
994 | static void dwc_issue_pending(struct dma_chan *chan) | |
995 | { | |
996 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
997 | ||
3bfb1d20 HS |
998 | if (!list_empty(&dwc->queue)) |
999 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
3bfb1d20 HS |
1000 | } |
1001 | ||
aa1e6f1a | 1002 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1003 | { |
1004 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1005 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1006 | struct dw_desc *desc; | |
3bfb1d20 | 1007 | int i; |
69cea5a0 | 1008 | unsigned long flags; |
3bfb1d20 | 1009 | |
2e4c364e | 1010 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1011 | |
3bfb1d20 HS |
1012 | /* ASSERT: channel is idle */ |
1013 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1014 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1015 | return -EIO; |
1016 | } | |
1017 | ||
d3ee98cd | 1018 | dma_cookie_init(chan); |
3bfb1d20 | 1019 | |
3bfb1d20 HS |
1020 | /* |
1021 | * NOTE: some controllers may have additional features that we | |
1022 | * need to initialize here, like "scatter-gather" (which | |
1023 | * doesn't mean what you think it means), and status writeback. | |
1024 | */ | |
1025 | ||
69cea5a0 | 1026 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1027 | i = dwc->descs_allocated; |
1028 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
69cea5a0 | 1029 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1030 | |
1031 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | |
1032 | if (!desc) { | |
41d5e59c | 1033 | dev_info(chan2dev(chan), |
3bfb1d20 | 1034 | "only allocated %d descriptors\n", i); |
69cea5a0 | 1035 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1036 | break; |
1037 | } | |
1038 | ||
e0bd0f8c | 1039 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1040 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1041 | desc->txd.tx_submit = dwc_tx_submit; | |
1042 | desc->txd.flags = DMA_CTRL_ACK; | |
41d5e59c | 1043 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
3bfb1d20 HS |
1044 | sizeof(desc->lli), DMA_TO_DEVICE); |
1045 | dwc_desc_put(dwc, desc); | |
1046 | ||
69cea5a0 | 1047 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1048 | i = ++dwc->descs_allocated; |
1049 | } | |
1050 | ||
69cea5a0 | 1051 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1052 | |
2e4c364e | 1053 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
3bfb1d20 HS |
1054 | |
1055 | return i; | |
1056 | } | |
1057 | ||
1058 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1059 | { | |
1060 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1061 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1062 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1063 | unsigned long flags; |
3bfb1d20 HS |
1064 | LIST_HEAD(list); |
1065 | ||
2e4c364e | 1066 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1067 | dwc->descs_allocated); |
1068 | ||
1069 | /* ASSERT: channel is idle */ | |
1070 | BUG_ON(!list_empty(&dwc->active_list)); | |
1071 | BUG_ON(!list_empty(&dwc->queue)); | |
1072 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1073 | ||
69cea5a0 | 1074 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1075 | list_splice_init(&dwc->free_list, &list); |
1076 | dwc->descs_allocated = 0; | |
61e183f8 | 1077 | dwc->initialized = false; |
3bfb1d20 HS |
1078 | |
1079 | /* Disable interrupts */ | |
1080 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
3bfb1d20 HS |
1081 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1082 | ||
69cea5a0 | 1083 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1084 | |
1085 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
41d5e59c DW |
1086 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1087 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
3bfb1d20 HS |
1088 | sizeof(desc->lli), DMA_TO_DEVICE); |
1089 | kfree(desc); | |
1090 | } | |
1091 | ||
2e4c364e | 1092 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1093 | } |
1094 | ||
d9de4519 HCE |
1095 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1096 | ||
1097 | /** | |
1098 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1099 | * @chan: the DMA channel to start | |
1100 | * | |
1101 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1102 | * -errno on failure. | |
1103 | */ | |
1104 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1105 | { | |
1106 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1107 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1108 | unsigned long flags; |
d9de4519 HCE |
1109 | |
1110 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1111 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1112 | return -ENODEV; | |
1113 | } | |
1114 | ||
69cea5a0 | 1115 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1116 | |
1117 | /* assert channel is idle */ | |
1118 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
1119 | dev_err(chan2dev(&dwc->chan), | |
1120 | "BUG: Attempted to start non-idle channel\n"); | |
1d455437 | 1121 | dwc_dump_chan_regs(dwc); |
69cea5a0 | 1122 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1123 | return -EBUSY; |
1124 | } | |
1125 | ||
d9de4519 HCE |
1126 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1127 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1128 | ||
1129 | /* setup DMAC channel registers */ | |
1130 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | |
1131 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
1132 | channel_writel(dwc, CTL_HI, 0); | |
1133 | ||
1134 | channel_set_bit(dw, CH_EN, dwc->mask); | |
1135 | ||
69cea5a0 | 1136 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1137 | |
1138 | return 0; | |
1139 | } | |
1140 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1141 | ||
1142 | /** | |
1143 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1144 | * @chan: the DMA channel to stop | |
1145 | * | |
1146 | * Must be called with soft interrupts disabled. | |
1147 | */ | |
1148 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1149 | { | |
1150 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1151 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1152 | unsigned long flags; |
d9de4519 | 1153 | |
69cea5a0 | 1154 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1155 | |
3f936207 | 1156 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1157 | |
69cea5a0 | 1158 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1159 | } |
1160 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1161 | ||
1162 | /** | |
1163 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1164 | * @chan: the DMA channel to prepare | |
1165 | * @buf_addr: physical DMA address where the buffer starts | |
1166 | * @buf_len: total number of bytes for the entire buffer | |
1167 | * @period_len: number of bytes for each period | |
1168 | * @direction: transfer direction, to or from device | |
1169 | * | |
1170 | * Must be called before trying to start the transfer. Returns a valid struct | |
1171 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1172 | */ | |
1173 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1174 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1175 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1176 | { |
1177 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1178 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1179 | struct dw_cyclic_desc *cdesc; |
1180 | struct dw_cyclic_desc *retval = NULL; | |
1181 | struct dw_desc *desc; | |
1182 | struct dw_desc *last = NULL; | |
d9de4519 HCE |
1183 | unsigned long was_cyclic; |
1184 | unsigned int reg_width; | |
1185 | unsigned int periods; | |
1186 | unsigned int i; | |
69cea5a0 | 1187 | unsigned long flags; |
d9de4519 | 1188 | |
69cea5a0 | 1189 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1190 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1191 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1192 | dev_dbg(chan2dev(&dwc->chan), |
1193 | "queue and/or active list are not empty\n"); | |
1194 | return ERR_PTR(-EBUSY); | |
1195 | } | |
1196 | ||
1197 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1198 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1199 | if (was_cyclic) { |
1200 | dev_dbg(chan2dev(&dwc->chan), | |
1201 | "channel already prepared for cyclic DMA\n"); | |
1202 | return ERR_PTR(-EBUSY); | |
1203 | } | |
1204 | ||
1205 | retval = ERR_PTR(-EINVAL); | |
327e6970 VK |
1206 | |
1207 | if (direction == DMA_MEM_TO_DEV) | |
1208 | reg_width = __ffs(sconfig->dst_addr_width); | |
1209 | else | |
1210 | reg_width = __ffs(sconfig->src_addr_width); | |
1211 | ||
d9de4519 HCE |
1212 | periods = buf_len / period_len; |
1213 | ||
1214 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
1215 | if (period_len > (DWC_MAX_COUNT << reg_width)) | |
1216 | goto out_err; | |
1217 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1218 | goto out_err; | |
1219 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1220 | goto out_err; | |
db8196df | 1221 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
d9de4519 HCE |
1222 | goto out_err; |
1223 | ||
1224 | retval = ERR_PTR(-ENOMEM); | |
1225 | ||
1226 | if (periods > NR_DESCS_PER_CHANNEL) | |
1227 | goto out_err; | |
1228 | ||
1229 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1230 | if (!cdesc) | |
1231 | goto out_err; | |
1232 | ||
1233 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1234 | if (!cdesc->desc) | |
1235 | goto out_err_alloc; | |
1236 | ||
1237 | for (i = 0; i < periods; i++) { | |
1238 | desc = dwc_desc_get(dwc); | |
1239 | if (!desc) | |
1240 | goto out_err_desc_get; | |
1241 | ||
1242 | switch (direction) { | |
db8196df | 1243 | case DMA_MEM_TO_DEV: |
327e6970 | 1244 | desc->lli.dar = sconfig->dst_addr; |
d9de4519 | 1245 | desc->lli.sar = buf_addr + (period_len * i); |
327e6970 | 1246 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
d9de4519 HCE |
1247 | | DWC_CTLL_DST_WIDTH(reg_width) |
1248 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1249 | | DWC_CTLL_DST_FIX | |
1250 | | DWC_CTLL_SRC_INC | |
d9de4519 | 1251 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1252 | |
1253 | desc->lli.ctllo |= sconfig->device_fc ? | |
1254 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1255 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
1256 | ||
d9de4519 | 1257 | break; |
db8196df | 1258 | case DMA_DEV_TO_MEM: |
d9de4519 | 1259 | desc->lli.dar = buf_addr + (period_len * i); |
327e6970 VK |
1260 | desc->lli.sar = sconfig->src_addr; |
1261 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | |
d9de4519 HCE |
1262 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1263 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1264 | | DWC_CTLL_DST_INC | |
1265 | | DWC_CTLL_SRC_FIX | |
d9de4519 | 1266 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1267 | |
1268 | desc->lli.ctllo |= sconfig->device_fc ? | |
1269 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1270 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
1271 | ||
d9de4519 HCE |
1272 | break; |
1273 | default: | |
1274 | break; | |
1275 | } | |
1276 | ||
1277 | desc->lli.ctlhi = (period_len >> reg_width); | |
1278 | cdesc->desc[i] = desc; | |
1279 | ||
1280 | if (last) { | |
1281 | last->lli.llp = desc->txd.phys; | |
1282 | dma_sync_single_for_device(chan2parent(chan), | |
1283 | last->txd.phys, sizeof(last->lli), | |
1284 | DMA_TO_DEVICE); | |
1285 | } | |
1286 | ||
1287 | last = desc; | |
1288 | } | |
1289 | ||
1290 | /* lets make a cyclic list */ | |
1291 | last->lli.llp = cdesc->desc[0]->txd.phys; | |
1292 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | |
1293 | sizeof(last->lli), DMA_TO_DEVICE); | |
1294 | ||
2f45d613 AS |
1295 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1296 | "period %zu periods %d\n", (unsigned long long)buf_addr, | |
1297 | buf_len, period_len, periods); | |
d9de4519 HCE |
1298 | |
1299 | cdesc->periods = periods; | |
1300 | dwc->cdesc = cdesc; | |
1301 | ||
1302 | return cdesc; | |
1303 | ||
1304 | out_err_desc_get: | |
1305 | while (i--) | |
1306 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1307 | out_err_alloc: | |
1308 | kfree(cdesc); | |
1309 | out_err: | |
1310 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1311 | return (struct dw_cyclic_desc *)retval; | |
1312 | } | |
1313 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1314 | ||
1315 | /** | |
1316 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1317 | * @chan: the DMA channel to free | |
1318 | */ | |
1319 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1320 | { | |
1321 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1322 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1323 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1324 | int i; | |
69cea5a0 | 1325 | unsigned long flags; |
d9de4519 | 1326 | |
2e4c364e | 1327 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1328 | |
1329 | if (!cdesc) | |
1330 | return; | |
1331 | ||
69cea5a0 | 1332 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1333 | |
3f936207 | 1334 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1335 | |
d9de4519 HCE |
1336 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1337 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1338 | ||
69cea5a0 | 1339 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1340 | |
1341 | for (i = 0; i < cdesc->periods; i++) | |
1342 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1343 | ||
1344 | kfree(cdesc->desc); | |
1345 | kfree(cdesc); | |
1346 | ||
1347 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1348 | } | |
1349 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1350 | ||
3bfb1d20 HS |
1351 | /*----------------------------------------------------------------------*/ |
1352 | ||
1353 | static void dw_dma_off(struct dw_dma *dw) | |
1354 | { | |
61e183f8 VK |
1355 | int i; |
1356 | ||
3bfb1d20 HS |
1357 | dma_writel(dw, CFG, 0); |
1358 | ||
1359 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1360 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1361 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1362 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1363 | ||
1364 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1365 | cpu_relax(); | |
61e183f8 VK |
1366 | |
1367 | for (i = 0; i < dw->dma.chancnt; i++) | |
1368 | dw->chan[i].initialized = false; | |
3bfb1d20 HS |
1369 | } |
1370 | ||
0272e93f | 1371 | static int __devinit dw_probe(struct platform_device *pdev) |
3bfb1d20 HS |
1372 | { |
1373 | struct dw_dma_platform_data *pdata; | |
1374 | struct resource *io; | |
1375 | struct dw_dma *dw; | |
1376 | size_t size; | |
1377 | int irq; | |
1378 | int err; | |
1379 | int i; | |
1380 | ||
6c618c9d | 1381 | pdata = dev_get_platdata(&pdev->dev); |
3bfb1d20 HS |
1382 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1383 | return -EINVAL; | |
1384 | ||
1385 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1386 | if (!io) | |
1387 | return -EINVAL; | |
1388 | ||
1389 | irq = platform_get_irq(pdev, 0); | |
1390 | if (irq < 0) | |
1391 | return irq; | |
1392 | ||
1393 | size = sizeof(struct dw_dma); | |
1394 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | |
1395 | dw = kzalloc(size, GFP_KERNEL); | |
1396 | if (!dw) | |
1397 | return -ENOMEM; | |
1398 | ||
1399 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | |
1400 | err = -EBUSY; | |
1401 | goto err_kfree; | |
1402 | } | |
1403 | ||
3bfb1d20 HS |
1404 | dw->regs = ioremap(io->start, DW_REGLEN); |
1405 | if (!dw->regs) { | |
1406 | err = -ENOMEM; | |
1407 | goto err_release_r; | |
1408 | } | |
1409 | ||
1410 | dw->clk = clk_get(&pdev->dev, "hclk"); | |
1411 | if (IS_ERR(dw->clk)) { | |
1412 | err = PTR_ERR(dw->clk); | |
1413 | goto err_clk; | |
1414 | } | |
3075528d | 1415 | clk_prepare_enable(dw->clk); |
3bfb1d20 | 1416 | |
11f932ec AS |
1417 | /* Calculate all channel mask before DMA setup */ |
1418 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | |
1419 | ||
3bfb1d20 HS |
1420 | /* force dma off, just in case */ |
1421 | dw_dma_off(dw); | |
1422 | ||
236b106f AS |
1423 | /* disable BLOCK interrupts as well */ |
1424 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | |
1425 | ||
3bfb1d20 HS |
1426 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1427 | if (err) | |
1428 | goto err_irq; | |
1429 | ||
1430 | platform_set_drvdata(pdev, dw); | |
1431 | ||
1432 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | |
1433 | ||
3bfb1d20 | 1434 | INIT_LIST_HEAD(&dw->dma.channels); |
46389470 | 1435 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1436 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1437 | ||
1438 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1439 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1440 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1441 | list_add_tail(&dwc->chan.device_node, | |
1442 | &dw->dma.channels); | |
1443 | else | |
1444 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1445 | |
93317e8e VK |
1446 | /* 7 is highest priority & 0 is lowest. */ |
1447 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
e8d9f875 | 1448 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1449 | else |
1450 | dwc->priority = i; | |
1451 | ||
3bfb1d20 HS |
1452 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1453 | spin_lock_init(&dwc->lock); | |
1454 | dwc->mask = 1 << i; | |
1455 | ||
1456 | INIT_LIST_HEAD(&dwc->active_list); | |
1457 | INIT_LIST_HEAD(&dwc->queue); | |
1458 | INIT_LIST_HEAD(&dwc->free_list); | |
1459 | ||
1460 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1461 | } | |
1462 | ||
11f932ec | 1463 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1464 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1465 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1466 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1467 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1468 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1469 | ||
3bfb1d20 HS |
1470 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1471 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | |
95ea759e JI |
1472 | if (pdata->is_private) |
1473 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
3bfb1d20 HS |
1474 | dw->dma.dev = &pdev->dev; |
1475 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | |
1476 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1477 | ||
1478 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
1479 | ||
1480 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | |
c3635c78 | 1481 | dw->dma.device_control = dwc_control; |
3bfb1d20 | 1482 | |
07934481 | 1483 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1484 | dw->dma.device_issue_pending = dwc_issue_pending; |
1485 | ||
1486 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1487 | ||
1488 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | |
46389470 | 1489 | dev_name(&pdev->dev), pdata->nr_channels); |
3bfb1d20 HS |
1490 | |
1491 | dma_async_device_register(&dw->dma); | |
1492 | ||
1493 | return 0; | |
1494 | ||
1495 | err_irq: | |
3075528d | 1496 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1497 | clk_put(dw->clk); |
1498 | err_clk: | |
1499 | iounmap(dw->regs); | |
1500 | dw->regs = NULL; | |
1501 | err_release_r: | |
1502 | release_resource(io); | |
1503 | err_kfree: | |
1504 | kfree(dw); | |
1505 | return err; | |
1506 | } | |
1507 | ||
0272e93f | 1508 | static int __devexit dw_remove(struct platform_device *pdev) |
3bfb1d20 HS |
1509 | { |
1510 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1511 | struct dw_dma_chan *dwc, *_dwc; | |
1512 | struct resource *io; | |
1513 | ||
1514 | dw_dma_off(dw); | |
1515 | dma_async_device_unregister(&dw->dma); | |
1516 | ||
1517 | free_irq(platform_get_irq(pdev, 0), dw); | |
1518 | tasklet_kill(&dw->tasklet); | |
1519 | ||
1520 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1521 | chan.device_node) { | |
1522 | list_del(&dwc->chan.device_node); | |
1523 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1524 | } | |
1525 | ||
3075528d | 1526 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1527 | clk_put(dw->clk); |
1528 | ||
1529 | iounmap(dw->regs); | |
1530 | dw->regs = NULL; | |
1531 | ||
1532 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1533 | release_mem_region(io->start, DW_REGLEN); | |
1534 | ||
1535 | kfree(dw); | |
1536 | ||
1537 | return 0; | |
1538 | } | |
1539 | ||
1540 | static void dw_shutdown(struct platform_device *pdev) | |
1541 | { | |
1542 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1543 | ||
1544 | dw_dma_off(platform_get_drvdata(pdev)); | |
3075528d | 1545 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1546 | } |
1547 | ||
4a256b5f | 1548 | static int dw_suspend_noirq(struct device *dev) |
3bfb1d20 | 1549 | { |
4a256b5f | 1550 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1551 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1552 | ||
1553 | dw_dma_off(platform_get_drvdata(pdev)); | |
3075528d | 1554 | clk_disable_unprepare(dw->clk); |
61e183f8 | 1555 | |
3bfb1d20 HS |
1556 | return 0; |
1557 | } | |
1558 | ||
4a256b5f | 1559 | static int dw_resume_noirq(struct device *dev) |
3bfb1d20 | 1560 | { |
4a256b5f | 1561 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1562 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1563 | ||
3075528d | 1564 | clk_prepare_enable(dw->clk); |
3bfb1d20 HS |
1565 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1566 | return 0; | |
3bfb1d20 HS |
1567 | } |
1568 | ||
47145210 | 1569 | static const struct dev_pm_ops dw_dev_pm_ops = { |
4a256b5f MD |
1570 | .suspend_noirq = dw_suspend_noirq, |
1571 | .resume_noirq = dw_resume_noirq, | |
7414a1b8 RK |
1572 | .freeze_noirq = dw_suspend_noirq, |
1573 | .thaw_noirq = dw_resume_noirq, | |
1574 | .restore_noirq = dw_resume_noirq, | |
1575 | .poweroff_noirq = dw_suspend_noirq, | |
4a256b5f MD |
1576 | }; |
1577 | ||
d3f797d9 VK |
1578 | #ifdef CONFIG_OF |
1579 | static const struct of_device_id dw_dma_id_table[] = { | |
1580 | { .compatible = "snps,dma-spear1340" }, | |
1581 | {} | |
1582 | }; | |
1583 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | |
1584 | #endif | |
1585 | ||
3bfb1d20 | 1586 | static struct platform_driver dw_driver = { |
0272e93f | 1587 | .remove = __devexit_p(dw_remove), |
3bfb1d20 | 1588 | .shutdown = dw_shutdown, |
3bfb1d20 HS |
1589 | .driver = { |
1590 | .name = "dw_dmac", | |
4a256b5f | 1591 | .pm = &dw_dev_pm_ops, |
d3f797d9 | 1592 | .of_match_table = of_match_ptr(dw_dma_id_table), |
3bfb1d20 HS |
1593 | }, |
1594 | }; | |
1595 | ||
1596 | static int __init dw_init(void) | |
1597 | { | |
1598 | return platform_driver_probe(&dw_driver, dw_probe); | |
1599 | } | |
cb689a70 | 1600 | subsys_initcall(dw_init); |
3bfb1d20 HS |
1601 | |
1602 | static void __exit dw_exit(void) | |
1603 | { | |
1604 | platform_driver_unregister(&dw_driver); | |
1605 | } | |
1606 | module_exit(dw_exit); | |
1607 | ||
1608 | MODULE_LICENSE("GPL v2"); | |
1609 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | |
e05503ef | 1610 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
10d8935f | 1611 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |