2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
25 #include "dw_dmac_regs.h"
28 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
29 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
30 * of which use ARM any more). See the "Databook" from Synopsys for
31 * information beyond what licensees probably provide.
33 * The driver has currently been tested only with the Atmel AT32AP7000,
34 * which does not support descriptor writeback.
37 #define DWC_DEFAULT_CTLLO(_chan) ({ \
38 struct dw_dma_slave *__slave = (_chan->private); \
39 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
40 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
41 int _dms = __slave ? __slave->dst_master : 0; \
42 int _sms = __slave ? __slave->src_master : 1; \
43 u8 _smsize = __slave ? _sconfig->src_maxburst : \
45 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
48 (DWC_CTLL_DST_MSIZE(_dmsize) \
49 | DWC_CTLL_SRC_MSIZE(_smsize) \
52 | DWC_CTLL_DMS(_dms) \
53 | DWC_CTLL_SMS(_sms)); \
57 * This is configuration-dependent and usually a funny size like 4095.
59 * Note that this is a transfer count, i.e. if we transfer 32-bit
60 * words, we can do 16380 bytes per descriptor.
62 * This parameter is also system-specific.
64 #define DWC_MAX_COUNT 4095U
67 * Number of descriptors to allocate for each channel. This should be
68 * made configurable somehow; preferably, the clients (at least the
69 * ones using slave transfers) should be able to give us a hint.
71 #define NR_DESCS_PER_CHANNEL 64
73 /*----------------------------------------------------------------------*/
76 * Because we're not relying on writeback from the controller (it may not
77 * even be configured into the core!) we don't need to use dma_pool. These
78 * descriptors -- and associated data -- are cacheable. We do need to make
79 * sure their dcache entries are written back before handing them off to
80 * the controller, though.
83 static struct device
*chan2dev(struct dma_chan
*chan
)
85 return &chan
->dev
->device
;
87 static struct device
*chan2parent(struct dma_chan
*chan
)
89 return chan
->dev
->device
.parent
;
92 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
94 return list_entry(dwc
->active_list
.next
, struct dw_desc
, desc_node
);
97 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
99 struct dw_desc
*desc
, *_desc
;
100 struct dw_desc
*ret
= NULL
;
104 spin_lock_irqsave(&dwc
->lock
, flags
);
105 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
106 if (async_tx_test_ack(&desc
->txd
)) {
107 list_del(&desc
->desc_node
);
111 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
114 spin_unlock_irqrestore(&dwc
->lock
, flags
);
116 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
121 static void dwc_sync_desc_for_cpu(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
123 struct dw_desc
*child
;
125 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
126 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
127 child
->txd
.phys
, sizeof(child
->lli
),
129 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
130 desc
->txd
.phys
, sizeof(desc
->lli
),
135 * Move a descriptor, including any children, to the free list.
136 * `desc' must not be on any lists.
138 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
143 struct dw_desc
*child
;
145 dwc_sync_desc_for_cpu(dwc
, desc
);
147 spin_lock_irqsave(&dwc
->lock
, flags
);
148 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
149 dev_vdbg(chan2dev(&dwc
->chan
),
150 "moving child desc %p to freelist\n",
152 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
153 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
154 list_add(&desc
->desc_node
, &dwc
->free_list
);
155 spin_unlock_irqrestore(&dwc
->lock
, flags
);
159 /* Called with dwc->lock held and bh disabled */
161 dwc_assign_cookie(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
163 dma_cookie_t cookie
= dwc
->chan
.cookie
;
168 dwc
->chan
.cookie
= cookie
;
169 desc
->txd
.cookie
= cookie
;
174 static void dwc_initialize(struct dw_dma_chan
*dwc
)
176 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
177 struct dw_dma_slave
*dws
= dwc
->chan
.private;
178 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
179 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
181 if (dwc
->initialized
== true)
186 * We need controller-specific data to set up slave
189 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
192 cfglo
|= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
195 channel_writel(dwc
, CFG_LO
, cfglo
);
196 channel_writel(dwc
, CFG_HI
, cfghi
);
198 /* Enable interrupts */
199 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
200 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
202 dwc
->initialized
= true;
205 /*----------------------------------------------------------------------*/
207 /* Called with dwc->lock held and bh disabled */
208 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
210 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
212 /* ASSERT: channel is idle */
213 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
214 dev_err(chan2dev(&dwc
->chan
),
215 "BUG: Attempted to start non-idle channel\n");
216 dev_err(chan2dev(&dwc
->chan
),
217 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
218 channel_readl(dwc
, SAR
),
219 channel_readl(dwc
, DAR
),
220 channel_readl(dwc
, LLP
),
221 channel_readl(dwc
, CTL_HI
),
222 channel_readl(dwc
, CTL_LO
));
224 /* The tasklet will hopefully advance the queue... */
230 channel_writel(dwc
, LLP
, first
->txd
.phys
);
231 channel_writel(dwc
, CTL_LO
,
232 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
233 channel_writel(dwc
, CTL_HI
, 0);
234 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
237 /*----------------------------------------------------------------------*/
240 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
241 bool callback_required
)
243 dma_async_tx_callback callback
= NULL
;
245 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
246 struct dw_desc
*child
;
249 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
251 spin_lock_irqsave(&dwc
->lock
, flags
);
252 dwc
->chan
.completed_cookie
= txd
->cookie
;
253 if (callback_required
) {
254 callback
= txd
->callback
;
255 param
= txd
->callback_param
;
258 dwc_sync_desc_for_cpu(dwc
, desc
);
261 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
262 async_tx_ack(&child
->txd
);
263 async_tx_ack(&desc
->txd
);
265 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
266 list_move(&desc
->desc_node
, &dwc
->free_list
);
268 if (!dwc
->chan
.private) {
269 struct device
*parent
= chan2parent(&dwc
->chan
);
270 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
271 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
272 dma_unmap_single(parent
, desc
->lli
.dar
,
273 desc
->len
, DMA_FROM_DEVICE
);
275 dma_unmap_page(parent
, desc
->lli
.dar
,
276 desc
->len
, DMA_FROM_DEVICE
);
278 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
279 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
280 dma_unmap_single(parent
, desc
->lli
.sar
,
281 desc
->len
, DMA_TO_DEVICE
);
283 dma_unmap_page(parent
, desc
->lli
.sar
,
284 desc
->len
, DMA_TO_DEVICE
);
288 spin_unlock_irqrestore(&dwc
->lock
, flags
);
290 if (callback_required
&& callback
)
294 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
296 struct dw_desc
*desc
, *_desc
;
300 spin_lock_irqsave(&dwc
->lock
, flags
);
301 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
302 dev_err(chan2dev(&dwc
->chan
),
303 "BUG: XFER bit set, but channel not idle!\n");
305 /* Try to continue after resetting the channel... */
306 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
307 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
312 * Submit queued descriptors ASAP, i.e. before we go through
313 * the completed ones.
315 list_splice_init(&dwc
->active_list
, &list
);
316 if (!list_empty(&dwc
->queue
)) {
317 list_move(dwc
->queue
.next
, &dwc
->active_list
);
318 dwc_dostart(dwc
, dwc_first_active(dwc
));
321 spin_unlock_irqrestore(&dwc
->lock
, flags
);
323 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
324 dwc_descriptor_complete(dwc
, desc
, true);
327 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
330 struct dw_desc
*desc
, *_desc
;
331 struct dw_desc
*child
;
335 spin_lock_irqsave(&dwc
->lock
, flags
);
336 llp
= channel_readl(dwc
, LLP
);
337 status_xfer
= dma_readl(dw
, RAW
.XFER
);
339 if (status_xfer
& dwc
->mask
) {
340 /* Everything we've submitted is done */
341 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
342 spin_unlock_irqrestore(&dwc
->lock
, flags
);
344 dwc_complete_all(dw
, dwc
);
348 if (list_empty(&dwc
->active_list
)) {
349 spin_unlock_irqrestore(&dwc
->lock
, flags
);
353 dev_vdbg(chan2dev(&dwc
->chan
), "scan_descriptors: llp=0x%x\n", llp
);
355 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
356 /* check first descriptors addr */
357 if (desc
->txd
.phys
== llp
) {
358 spin_unlock_irqrestore(&dwc
->lock
, flags
);
362 /* check first descriptors llp */
363 if (desc
->lli
.llp
== llp
) {
364 /* This one is currently in progress */
365 spin_unlock_irqrestore(&dwc
->lock
, flags
);
369 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
370 if (child
->lli
.llp
== llp
) {
371 /* Currently in progress */
372 spin_unlock_irqrestore(&dwc
->lock
, flags
);
377 * No descriptors so far seem to be in progress, i.e.
378 * this one must be done.
380 spin_unlock_irqrestore(&dwc
->lock
, flags
);
381 dwc_descriptor_complete(dwc
, desc
, true);
382 spin_lock_irqsave(&dwc
->lock
, flags
);
385 dev_err(chan2dev(&dwc
->chan
),
386 "BUG: All descriptors done, but channel not idle!\n");
388 /* Try to continue after resetting the channel... */
389 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
390 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
393 if (!list_empty(&dwc
->queue
)) {
394 list_move(dwc
->queue
.next
, &dwc
->active_list
);
395 dwc_dostart(dwc
, dwc_first_active(dwc
));
397 spin_unlock_irqrestore(&dwc
->lock
, flags
);
400 static void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
402 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
403 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
404 lli
->sar
, lli
->dar
, lli
->llp
,
405 lli
->ctlhi
, lli
->ctllo
);
408 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
410 struct dw_desc
*bad_desc
;
411 struct dw_desc
*child
;
414 dwc_scan_descriptors(dw
, dwc
);
416 spin_lock_irqsave(&dwc
->lock
, flags
);
419 * The descriptor currently at the head of the active list is
420 * borked. Since we don't have any way to report errors, we'll
421 * just have to scream loudly and try to carry on.
423 bad_desc
= dwc_first_active(dwc
);
424 list_del_init(&bad_desc
->desc_node
);
425 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
427 /* Clear the error flag and try to restart the controller */
428 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
429 if (!list_empty(&dwc
->active_list
))
430 dwc_dostart(dwc
, dwc_first_active(dwc
));
433 * KERN_CRITICAL may seem harsh, but since this only happens
434 * when someone submits a bad physical address in a
435 * descriptor, we should consider ourselves lucky that the
436 * controller flagged an error instead of scribbling over
437 * random memory locations.
439 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
440 "Bad descriptor submitted for DMA!\n");
441 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
442 " cookie: %d\n", bad_desc
->txd
.cookie
);
443 dwc_dump_lli(dwc
, &bad_desc
->lli
);
444 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
445 dwc_dump_lli(dwc
, &child
->lli
);
447 spin_unlock_irqrestore(&dwc
->lock
, flags
);
449 /* Pretend the descriptor completed successfully */
450 dwc_descriptor_complete(dwc
, bad_desc
, true);
453 /* --------------------- Cyclic DMA API extensions -------------------- */
455 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
457 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
458 return channel_readl(dwc
, SAR
);
460 EXPORT_SYMBOL(dw_dma_get_src_addr
);
462 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
464 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
465 return channel_readl(dwc
, DAR
);
467 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
469 /* called with dwc->lock held and all DMAC interrupts disabled */
470 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
471 u32 status_err
, u32 status_xfer
)
476 void (*callback
)(void *param
);
477 void *callback_param
;
479 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
480 channel_readl(dwc
, LLP
));
482 callback
= dwc
->cdesc
->period_callback
;
483 callback_param
= dwc
->cdesc
->period_callback_param
;
486 callback(callback_param
);
490 * Error and transfer complete are highly unlikely, and will most
491 * likely be due to a configuration error by the user.
493 if (unlikely(status_err
& dwc
->mask
) ||
494 unlikely(status_xfer
& dwc
->mask
)) {
497 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
498 "interrupt, stopping DMA transfer\n",
499 status_xfer
? "xfer" : "error");
501 spin_lock_irqsave(&dwc
->lock
, flags
);
503 dev_err(chan2dev(&dwc
->chan
),
504 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
505 channel_readl(dwc
, SAR
),
506 channel_readl(dwc
, DAR
),
507 channel_readl(dwc
, LLP
),
508 channel_readl(dwc
, CTL_HI
),
509 channel_readl(dwc
, CTL_LO
));
511 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
512 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
515 /* make sure DMA does not restart by loading a new list */
516 channel_writel(dwc
, LLP
, 0);
517 channel_writel(dwc
, CTL_LO
, 0);
518 channel_writel(dwc
, CTL_HI
, 0);
520 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
521 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
523 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
524 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
526 spin_unlock_irqrestore(&dwc
->lock
, flags
);
530 /* ------------------------------------------------------------------------- */
532 static void dw_dma_tasklet(unsigned long data
)
534 struct dw_dma
*dw
= (struct dw_dma
*)data
;
535 struct dw_dma_chan
*dwc
;
540 status_xfer
= dma_readl(dw
, RAW
.XFER
);
541 status_err
= dma_readl(dw
, RAW
.ERROR
);
543 dev_vdbg(dw
->dma
.dev
, "tasklet: status_err=%x\n", status_err
);
545 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
547 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
548 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
549 else if (status_err
& (1 << i
))
550 dwc_handle_error(dw
, dwc
);
551 else if (status_xfer
& (1 << i
))
552 dwc_scan_descriptors(dw
, dwc
);
556 * Re-enable interrupts.
558 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
559 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
562 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
564 struct dw_dma
*dw
= dev_id
;
567 dev_vdbg(dw
->dma
.dev
, "interrupt: status=0x%x\n",
568 dma_readl(dw
, STATUS_INT
));
571 * Just disable the interrupts. We'll turn them back on in the
574 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
575 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
577 status
= dma_readl(dw
, STATUS_INT
);
580 "BUG: Unexpected interrupts pending: 0x%x\n",
584 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
585 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
586 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
587 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
590 tasklet_schedule(&dw
->tasklet
);
595 /*----------------------------------------------------------------------*/
597 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
599 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
600 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
604 spin_lock_irqsave(&dwc
->lock
, flags
);
605 cookie
= dwc_assign_cookie(dwc
, desc
);
608 * REVISIT: We should attempt to chain as many descriptors as
609 * possible, perhaps even appending to those already submitted
610 * for DMA. But this is hard to do in a race-free manner.
612 if (list_empty(&dwc
->active_list
)) {
613 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
615 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
616 dwc_dostart(dwc
, dwc_first_active(dwc
));
618 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
621 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
624 spin_unlock_irqrestore(&dwc
->lock
, flags
);
629 static struct dma_async_tx_descriptor
*
630 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
631 size_t len
, unsigned long flags
)
633 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
634 struct dw_desc
*desc
;
635 struct dw_desc
*first
;
636 struct dw_desc
*prev
;
639 unsigned int src_width
;
640 unsigned int dst_width
;
643 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
644 dest
, src
, len
, flags
);
646 if (unlikely(!len
)) {
647 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
652 * We can be a lot more clever here, but this should take care
653 * of the most common optimization.
655 if (!((src
| dest
| len
) & 7))
656 src_width
= dst_width
= 3;
657 else if (!((src
| dest
| len
) & 3))
658 src_width
= dst_width
= 2;
659 else if (!((src
| dest
| len
) & 1))
660 src_width
= dst_width
= 1;
662 src_width
= dst_width
= 0;
664 ctllo
= DWC_DEFAULT_CTLLO(chan
)
665 | DWC_CTLL_DST_WIDTH(dst_width
)
666 | DWC_CTLL_SRC_WIDTH(src_width
)
672 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
673 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
676 desc
= dwc_desc_get(dwc
);
680 desc
->lli
.sar
= src
+ offset
;
681 desc
->lli
.dar
= dest
+ offset
;
682 desc
->lli
.ctllo
= ctllo
;
683 desc
->lli
.ctlhi
= xfer_count
;
688 prev
->lli
.llp
= desc
->txd
.phys
;
689 dma_sync_single_for_device(chan2parent(chan
),
690 prev
->txd
.phys
, sizeof(prev
->lli
),
692 list_add_tail(&desc
->desc_node
,
699 if (flags
& DMA_PREP_INTERRUPT
)
700 /* Trigger interrupt after last block */
701 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
704 dma_sync_single_for_device(chan2parent(chan
),
705 prev
->txd
.phys
, sizeof(prev
->lli
),
708 first
->txd
.flags
= flags
;
714 dwc_desc_put(dwc
, first
);
718 static struct dma_async_tx_descriptor
*
719 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
720 unsigned int sg_len
, enum dma_transfer_direction direction
,
723 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
724 struct dw_dma_slave
*dws
= chan
->private;
725 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
726 struct dw_desc
*prev
;
727 struct dw_desc
*first
;
730 unsigned int reg_width
;
731 unsigned int mem_width
;
733 struct scatterlist
*sg
;
734 size_t total_len
= 0;
736 dev_vdbg(chan2dev(chan
), "prep_dma_slave\n");
738 if (unlikely(!dws
|| !sg_len
))
745 reg_width
= __fls(sconfig
->dst_addr_width
);
746 reg
= sconfig
->dst_addr
;
747 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
748 | DWC_CTLL_DST_WIDTH(reg_width
)
752 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
753 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
755 for_each_sg(sgl
, sg
, sg_len
, i
) {
756 struct dw_desc
*desc
;
760 len
= sg_dma_len(sg
);
762 if (!((mem
| len
) & 7))
764 else if (!((mem
| len
) & 3))
766 else if (!((mem
| len
) & 1))
771 slave_sg_todev_fill_desc
:
772 desc
= dwc_desc_get(dwc
);
774 dev_err(chan2dev(chan
),
775 "not enough descriptors available\n");
781 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
782 if ((len
>> mem_width
) > DWC_MAX_COUNT
) {
783 dlen
= DWC_MAX_COUNT
<< mem_width
;
791 desc
->lli
.ctlhi
= dlen
>> mem_width
;
796 prev
->lli
.llp
= desc
->txd
.phys
;
797 dma_sync_single_for_device(chan2parent(chan
),
801 list_add_tail(&desc
->desc_node
,
808 goto slave_sg_todev_fill_desc
;
812 reg_width
= __fls(sconfig
->src_addr_width
);
813 reg
= sconfig
->src_addr
;
814 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
815 | DWC_CTLL_SRC_WIDTH(reg_width
)
819 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
820 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
822 for_each_sg(sgl
, sg
, sg_len
, i
) {
823 struct dw_desc
*desc
;
827 len
= sg_dma_len(sg
);
829 if (!((mem
| len
) & 7))
831 else if (!((mem
| len
) & 3))
833 else if (!((mem
| len
) & 1))
838 slave_sg_fromdev_fill_desc
:
839 desc
= dwc_desc_get(dwc
);
841 dev_err(chan2dev(chan
),
842 "not enough descriptors available\n");
848 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
849 if ((len
>> reg_width
) > DWC_MAX_COUNT
) {
850 dlen
= DWC_MAX_COUNT
<< reg_width
;
857 desc
->lli
.ctlhi
= dlen
>> reg_width
;
862 prev
->lli
.llp
= desc
->txd
.phys
;
863 dma_sync_single_for_device(chan2parent(chan
),
867 list_add_tail(&desc
->desc_node
,
874 goto slave_sg_fromdev_fill_desc
;
881 if (flags
& DMA_PREP_INTERRUPT
)
882 /* Trigger interrupt after last block */
883 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
886 dma_sync_single_for_device(chan2parent(chan
),
887 prev
->txd
.phys
, sizeof(prev
->lli
),
890 first
->len
= total_len
;
895 dwc_desc_put(dwc
, first
);
900 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
901 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
903 * NOTE: burst size 2 is not supported by controller.
905 * This can be done by finding least significant bit set: n & (n - 1)
907 static inline void convert_burst(u32
*maxburst
)
910 *maxburst
= fls(*maxburst
) - 2;
916 set_runtime_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
918 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
920 /* Check if it is chan is configured for slave transfers */
924 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
926 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
927 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
932 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
935 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
936 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
937 struct dw_desc
*desc
, *_desc
;
942 if (cmd
== DMA_PAUSE
) {
943 spin_lock_irqsave(&dwc
->lock
, flags
);
945 cfglo
= channel_readl(dwc
, CFG_LO
);
946 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
947 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
))
951 spin_unlock_irqrestore(&dwc
->lock
, flags
);
952 } else if (cmd
== DMA_RESUME
) {
956 spin_lock_irqsave(&dwc
->lock
, flags
);
958 cfglo
= channel_readl(dwc
, CFG_LO
);
959 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
962 spin_unlock_irqrestore(&dwc
->lock
, flags
);
963 } else if (cmd
== DMA_TERMINATE_ALL
) {
964 spin_lock_irqsave(&dwc
->lock
, flags
);
966 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
967 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
972 /* active_list entries will end up before queued entries */
973 list_splice_init(&dwc
->queue
, &list
);
974 list_splice_init(&dwc
->active_list
, &list
);
976 spin_unlock_irqrestore(&dwc
->lock
, flags
);
978 /* Flush all pending and queued descriptors */
979 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
980 dwc_descriptor_complete(dwc
, desc
, false);
981 } else if (cmd
== DMA_SLAVE_CONFIG
) {
982 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
990 static enum dma_status
991 dwc_tx_status(struct dma_chan
*chan
,
993 struct dma_tx_state
*txstate
)
995 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
996 dma_cookie_t last_used
;
997 dma_cookie_t last_complete
;
1000 last_complete
= chan
->completed_cookie
;
1001 last_used
= chan
->cookie
;
1003 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1004 if (ret
!= DMA_SUCCESS
) {
1005 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1007 last_complete
= chan
->completed_cookie
;
1008 last_used
= chan
->cookie
;
1010 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1013 if (ret
!= DMA_SUCCESS
)
1014 dma_set_tx_state(txstate
, last_complete
, last_used
,
1015 dwc_first_active(dwc
)->len
);
1017 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
1025 static void dwc_issue_pending(struct dma_chan
*chan
)
1027 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1029 if (!list_empty(&dwc
->queue
))
1030 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1033 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1035 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1036 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1037 struct dw_desc
*desc
;
1039 unsigned long flags
;
1041 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1043 /* ASSERT: channel is idle */
1044 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1045 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1049 chan
->completed_cookie
= chan
->cookie
= 1;
1052 * NOTE: some controllers may have additional features that we
1053 * need to initialize here, like "scatter-gather" (which
1054 * doesn't mean what you think it means), and status writeback.
1057 spin_lock_irqsave(&dwc
->lock
, flags
);
1058 i
= dwc
->descs_allocated
;
1059 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1060 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1062 desc
= kzalloc(sizeof(struct dw_desc
), GFP_KERNEL
);
1064 dev_info(chan2dev(chan
),
1065 "only allocated %d descriptors\n", i
);
1066 spin_lock_irqsave(&dwc
->lock
, flags
);
1070 INIT_LIST_HEAD(&desc
->tx_list
);
1071 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1072 desc
->txd
.tx_submit
= dwc_tx_submit
;
1073 desc
->txd
.flags
= DMA_CTRL_ACK
;
1074 desc
->txd
.phys
= dma_map_single(chan2parent(chan
), &desc
->lli
,
1075 sizeof(desc
->lli
), DMA_TO_DEVICE
);
1076 dwc_desc_put(dwc
, desc
);
1078 spin_lock_irqsave(&dwc
->lock
, flags
);
1079 i
= ++dwc
->descs_allocated
;
1082 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1084 dev_dbg(chan2dev(chan
),
1085 "alloc_chan_resources allocated %d descriptors\n", i
);
1090 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1092 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1093 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1094 struct dw_desc
*desc
, *_desc
;
1095 unsigned long flags
;
1098 dev_dbg(chan2dev(chan
), "free_chan_resources (descs allocated=%u)\n",
1099 dwc
->descs_allocated
);
1101 /* ASSERT: channel is idle */
1102 BUG_ON(!list_empty(&dwc
->active_list
));
1103 BUG_ON(!list_empty(&dwc
->queue
));
1104 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1106 spin_lock_irqsave(&dwc
->lock
, flags
);
1107 list_splice_init(&dwc
->free_list
, &list
);
1108 dwc
->descs_allocated
= 0;
1109 dwc
->initialized
= false;
1111 /* Disable interrupts */
1112 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1113 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1115 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1117 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1118 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1119 dma_unmap_single(chan2parent(chan
), desc
->txd
.phys
,
1120 sizeof(desc
->lli
), DMA_TO_DEVICE
);
1124 dev_vdbg(chan2dev(chan
), "free_chan_resources done\n");
1127 /* --------------------- Cyclic DMA API extensions -------------------- */
1130 * dw_dma_cyclic_start - start the cyclic DMA transfer
1131 * @chan: the DMA channel to start
1133 * Must be called with soft interrupts disabled. Returns zero on success or
1134 * -errno on failure.
1136 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1138 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1139 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1140 unsigned long flags
;
1142 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1143 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1147 spin_lock_irqsave(&dwc
->lock
, flags
);
1149 /* assert channel is idle */
1150 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1151 dev_err(chan2dev(&dwc
->chan
),
1152 "BUG: Attempted to start non-idle channel\n");
1153 dev_err(chan2dev(&dwc
->chan
),
1154 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1155 channel_readl(dwc
, SAR
),
1156 channel_readl(dwc
, DAR
),
1157 channel_readl(dwc
, LLP
),
1158 channel_readl(dwc
, CTL_HI
),
1159 channel_readl(dwc
, CTL_LO
));
1160 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1164 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1165 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1167 /* setup DMAC channel registers */
1168 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1169 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1170 channel_writel(dwc
, CTL_HI
, 0);
1172 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1174 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1178 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1181 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1182 * @chan: the DMA channel to stop
1184 * Must be called with soft interrupts disabled.
1186 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1188 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1189 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1190 unsigned long flags
;
1192 spin_lock_irqsave(&dwc
->lock
, flags
);
1194 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1195 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1198 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1200 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1203 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1204 * @chan: the DMA channel to prepare
1205 * @buf_addr: physical DMA address where the buffer starts
1206 * @buf_len: total number of bytes for the entire buffer
1207 * @period_len: number of bytes for each period
1208 * @direction: transfer direction, to or from device
1210 * Must be called before trying to start the transfer. Returns a valid struct
1211 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1213 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1214 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1215 enum dma_transfer_direction direction
)
1217 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1218 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1219 struct dw_cyclic_desc
*cdesc
;
1220 struct dw_cyclic_desc
*retval
= NULL
;
1221 struct dw_desc
*desc
;
1222 struct dw_desc
*last
= NULL
;
1223 unsigned long was_cyclic
;
1224 unsigned int reg_width
;
1225 unsigned int periods
;
1227 unsigned long flags
;
1229 spin_lock_irqsave(&dwc
->lock
, flags
);
1230 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1231 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1232 dev_dbg(chan2dev(&dwc
->chan
),
1233 "queue and/or active list are not empty\n");
1234 return ERR_PTR(-EBUSY
);
1237 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1238 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1240 dev_dbg(chan2dev(&dwc
->chan
),
1241 "channel already prepared for cyclic DMA\n");
1242 return ERR_PTR(-EBUSY
);
1245 retval
= ERR_PTR(-EINVAL
);
1247 if (direction
== DMA_MEM_TO_DEV
)
1248 reg_width
= __ffs(sconfig
->dst_addr_width
);
1250 reg_width
= __ffs(sconfig
->src_addr_width
);
1252 periods
= buf_len
/ period_len
;
1254 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1255 if (period_len
> (DWC_MAX_COUNT
<< reg_width
))
1257 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1259 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1261 if (unlikely(!(direction
& (DMA_MEM_TO_DEV
| DMA_DEV_TO_MEM
))))
1264 retval
= ERR_PTR(-ENOMEM
);
1266 if (periods
> NR_DESCS_PER_CHANNEL
)
1269 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1273 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1277 for (i
= 0; i
< periods
; i
++) {
1278 desc
= dwc_desc_get(dwc
);
1280 goto out_err_desc_get
;
1282 switch (direction
) {
1283 case DMA_MEM_TO_DEV
:
1284 desc
->lli
.dar
= sconfig
->dst_addr
;
1285 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1286 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1287 | DWC_CTLL_DST_WIDTH(reg_width
)
1288 | DWC_CTLL_SRC_WIDTH(reg_width
)
1293 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1294 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1295 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1298 case DMA_DEV_TO_MEM
:
1299 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1300 desc
->lli
.sar
= sconfig
->src_addr
;
1301 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1302 | DWC_CTLL_SRC_WIDTH(reg_width
)
1303 | DWC_CTLL_DST_WIDTH(reg_width
)
1308 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1309 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1310 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1317 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1318 cdesc
->desc
[i
] = desc
;
1321 last
->lli
.llp
= desc
->txd
.phys
;
1322 dma_sync_single_for_device(chan2parent(chan
),
1323 last
->txd
.phys
, sizeof(last
->lli
),
1330 /* lets make a cyclic list */
1331 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1332 dma_sync_single_for_device(chan2parent(chan
), last
->txd
.phys
,
1333 sizeof(last
->lli
), DMA_TO_DEVICE
);
1335 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%08x len %zu "
1336 "period %zu periods %d\n", buf_addr
, buf_len
,
1337 period_len
, periods
);
1339 cdesc
->periods
= periods
;
1346 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1350 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1351 return (struct dw_cyclic_desc
*)retval
;
1353 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1356 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1357 * @chan: the DMA channel to free
1359 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1361 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1362 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1363 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1365 unsigned long flags
;
1367 dev_dbg(chan2dev(&dwc
->chan
), "cyclic free\n");
1372 spin_lock_irqsave(&dwc
->lock
, flags
);
1374 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1375 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1378 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1379 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1381 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1383 for (i
= 0; i
< cdesc
->periods
; i
++)
1384 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1389 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1391 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1393 /*----------------------------------------------------------------------*/
1395 static void dw_dma_off(struct dw_dma
*dw
)
1399 dma_writel(dw
, CFG
, 0);
1401 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1402 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1403 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1404 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1406 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1409 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1410 dw
->chan
[i
].initialized
= false;
1413 static int __init
dw_probe(struct platform_device
*pdev
)
1415 struct dw_dma_platform_data
*pdata
;
1416 struct resource
*io
;
1423 pdata
= dev_get_platdata(&pdev
->dev
);
1424 if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1427 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1431 irq
= platform_get_irq(pdev
, 0);
1435 size
= sizeof(struct dw_dma
);
1436 size
+= pdata
->nr_channels
* sizeof(struct dw_dma_chan
);
1437 dw
= kzalloc(size
, GFP_KERNEL
);
1441 if (!request_mem_region(io
->start
, DW_REGLEN
, pdev
->dev
.driver
->name
)) {
1446 dw
->regs
= ioremap(io
->start
, DW_REGLEN
);
1452 dw
->clk
= clk_get(&pdev
->dev
, "hclk");
1453 if (IS_ERR(dw
->clk
)) {
1454 err
= PTR_ERR(dw
->clk
);
1457 clk_enable(dw
->clk
);
1459 /* force dma off, just in case */
1462 err
= request_irq(irq
, dw_dma_interrupt
, 0, "dw_dmac", dw
);
1466 platform_set_drvdata(pdev
, dw
);
1468 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1470 dw
->all_chan_mask
= (1 << pdata
->nr_channels
) - 1;
1472 INIT_LIST_HEAD(&dw
->dma
.channels
);
1473 for (i
= 0; i
< pdata
->nr_channels
; i
++) {
1474 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1476 dwc
->chan
.device
= &dw
->dma
;
1477 dwc
->chan
.cookie
= dwc
->chan
.completed_cookie
= 1;
1478 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1479 list_add_tail(&dwc
->chan
.device_node
,
1482 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1484 /* 7 is highest priority & 0 is lowest. */
1485 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1486 dwc
->priority
= pdata
->nr_channels
- i
- 1;
1490 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1491 spin_lock_init(&dwc
->lock
);
1494 INIT_LIST_HEAD(&dwc
->active_list
);
1495 INIT_LIST_HEAD(&dwc
->queue
);
1496 INIT_LIST_HEAD(&dwc
->free_list
);
1498 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1501 /* Clear/disable all interrupts on all channels. */
1502 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1503 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1504 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1505 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1507 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1508 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1509 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1510 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1512 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1513 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1514 if (pdata
->is_private
)
1515 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1516 dw
->dma
.dev
= &pdev
->dev
;
1517 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1518 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1520 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1522 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1523 dw
->dma
.device_control
= dwc_control
;
1525 dw
->dma
.device_tx_status
= dwc_tx_status
;
1526 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1528 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1530 printk(KERN_INFO
"%s: DesignWare DMA Controller, %d channels\n",
1531 dev_name(&pdev
->dev
), pdata
->nr_channels
);
1533 dma_async_device_register(&dw
->dma
);
1538 clk_disable(dw
->clk
);
1544 release_resource(io
);
1550 static int __exit
dw_remove(struct platform_device
*pdev
)
1552 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1553 struct dw_dma_chan
*dwc
, *_dwc
;
1554 struct resource
*io
;
1557 dma_async_device_unregister(&dw
->dma
);
1559 free_irq(platform_get_irq(pdev
, 0), dw
);
1560 tasklet_kill(&dw
->tasklet
);
1562 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1564 list_del(&dwc
->chan
.device_node
);
1565 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1568 clk_disable(dw
->clk
);
1574 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1575 release_mem_region(io
->start
, DW_REGLEN
);
1582 static void dw_shutdown(struct platform_device
*pdev
)
1584 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1586 dw_dma_off(platform_get_drvdata(pdev
));
1587 clk_disable(dw
->clk
);
1590 static int dw_suspend_noirq(struct device
*dev
)
1592 struct platform_device
*pdev
= to_platform_device(dev
);
1593 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1595 dw_dma_off(platform_get_drvdata(pdev
));
1596 clk_disable(dw
->clk
);
1601 static int dw_resume_noirq(struct device
*dev
)
1603 struct platform_device
*pdev
= to_platform_device(dev
);
1604 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1606 clk_enable(dw
->clk
);
1607 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1611 static const struct dev_pm_ops dw_dev_pm_ops
= {
1612 .suspend_noirq
= dw_suspend_noirq
,
1613 .resume_noirq
= dw_resume_noirq
,
1614 .freeze_noirq
= dw_suspend_noirq
,
1615 .thaw_noirq
= dw_resume_noirq
,
1616 .restore_noirq
= dw_resume_noirq
,
1617 .poweroff_noirq
= dw_suspend_noirq
,
1620 static struct platform_driver dw_driver
= {
1621 .remove
= __exit_p(dw_remove
),
1622 .shutdown
= dw_shutdown
,
1625 .pm
= &dw_dev_pm_ops
,
1629 static int __init
dw_init(void)
1631 return platform_driver_probe(&dw_driver
, dw_probe
);
1633 subsys_initcall(dw_init
);
1635 static void __exit
dw_exit(void)
1637 platform_driver_unregister(&dw_driver
);
1639 module_exit(dw_exit
);
1641 MODULE_LICENSE("GPL v2");
1642 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1643 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1644 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");