2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
17 #include <linux/clk.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
26 #include <linux/of_device.h>
28 #include "at_hdmac_regs.h"
29 #include "dmaengine.h"
35 * at_hdmac : Name of the ATmel AHB DMA Controller
36 * at_dma_ / atdma : ATmel DMA controller entity related
37 * atc_ / atchan : ATmel DMA Channel entity related
40 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
41 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
42 |ATC_DIF(AT_DMA_MEM_IF))
45 * Initial number of descriptors to allocate for each channel. This could
46 * be increased during dma usage.
48 static unsigned int init_nr_desc_per_channel
= 64;
49 module_param(init_nr_desc_per_channel
, uint
, 0644);
50 MODULE_PARM_DESC(init_nr_desc_per_channel
,
51 "initial descriptors per channel (default: 64)");
55 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
);
58 /*----------------------------------------------------------------------*/
60 static struct at_desc
*atc_first_active(struct at_dma_chan
*atchan
)
62 return list_first_entry(&atchan
->active_list
,
63 struct at_desc
, desc_node
);
66 static struct at_desc
*atc_first_queued(struct at_dma_chan
*atchan
)
68 return list_first_entry(&atchan
->queue
,
69 struct at_desc
, desc_node
);
73 * atc_alloc_descriptor - allocate and return an initialized descriptor
74 * @chan: the channel to allocate descriptors for
75 * @gfp_flags: GFP allocation flags
77 * Note: The ack-bit is positioned in the descriptor flag at creation time
78 * to make initial allocation more convenient. This bit will be cleared
79 * and control will be given to client at usage time (during
80 * preparation functions).
82 static struct at_desc
*atc_alloc_descriptor(struct dma_chan
*chan
,
85 struct at_desc
*desc
= NULL
;
86 struct at_dma
*atdma
= to_at_dma(chan
->device
);
89 desc
= dma_pool_alloc(atdma
->dma_desc_pool
, gfp_flags
, &phys
);
91 memset(desc
, 0, sizeof(struct at_desc
));
92 INIT_LIST_HEAD(&desc
->tx_list
);
93 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
94 /* txd.flags will be overwritten in prep functions */
95 desc
->txd
.flags
= DMA_CTRL_ACK
;
96 desc
->txd
.tx_submit
= atc_tx_submit
;
97 desc
->txd
.phys
= phys
;
104 * atc_desc_get - get an unused descriptor from free_list
105 * @atchan: channel we want a new descriptor for
107 static struct at_desc
*atc_desc_get(struct at_dma_chan
*atchan
)
109 struct at_desc
*desc
, *_desc
;
110 struct at_desc
*ret
= NULL
;
115 spin_lock_irqsave(&atchan
->lock
, flags
);
116 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
118 if (async_tx_test_ack(&desc
->txd
)) {
119 list_del(&desc
->desc_node
);
123 dev_dbg(chan2dev(&atchan
->chan_common
),
124 "desc %p not ACKed\n", desc
);
126 spin_unlock_irqrestore(&atchan
->lock
, flags
);
127 dev_vdbg(chan2dev(&atchan
->chan_common
),
128 "scanned %u descriptors on freelist\n", i
);
130 /* no more descriptor available in initial pool: create one more */
132 ret
= atc_alloc_descriptor(&atchan
->chan_common
, GFP_ATOMIC
);
134 spin_lock_irqsave(&atchan
->lock
, flags
);
135 atchan
->descs_allocated
++;
136 spin_unlock_irqrestore(&atchan
->lock
, flags
);
138 dev_err(chan2dev(&atchan
->chan_common
),
139 "not enough descriptors available\n");
147 * atc_desc_put - move a descriptor, including any children, to the free list
148 * @atchan: channel we work on
149 * @desc: descriptor, at the head of a chain, to move to free list
151 static void atc_desc_put(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
154 struct at_desc
*child
;
157 spin_lock_irqsave(&atchan
->lock
, flags
);
158 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
159 dev_vdbg(chan2dev(&atchan
->chan_common
),
160 "moving child desc %p to freelist\n",
162 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
163 dev_vdbg(chan2dev(&atchan
->chan_common
),
164 "moving desc %p to freelist\n", desc
);
165 list_add(&desc
->desc_node
, &atchan
->free_list
);
166 spin_unlock_irqrestore(&atchan
->lock
, flags
);
171 * atc_desc_chain - build chain adding a descripor
172 * @first: address of first descripor of the chain
173 * @prev: address of previous descripor of the chain
174 * @desc: descriptor to queue
176 * Called from prep_* functions
178 static void atc_desc_chain(struct at_desc
**first
, struct at_desc
**prev
,
179 struct at_desc
*desc
)
184 /* inform the HW lli about chaining */
185 (*prev
)->lli
.dscr
= desc
->txd
.phys
;
186 /* insert the link descriptor to the LD ring */
187 list_add_tail(&desc
->desc_node
,
194 * atc_dostart - starts the DMA engine for real
195 * @atchan: the channel we want to start
196 * @first: first descriptor in the list we want to begin with
198 * Called with atchan->lock held and bh disabled
200 static void atc_dostart(struct at_dma_chan
*atchan
, struct at_desc
*first
)
202 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
204 /* ASSERT: channel is idle */
205 if (atc_chan_is_enabled(atchan
)) {
206 dev_err(chan2dev(&atchan
->chan_common
),
207 "BUG: Attempted to start non-idle channel\n");
208 dev_err(chan2dev(&atchan
->chan_common
),
209 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
210 channel_readl(atchan
, SADDR
),
211 channel_readl(atchan
, DADDR
),
212 channel_readl(atchan
, CTRLA
),
213 channel_readl(atchan
, CTRLB
),
214 channel_readl(atchan
, DSCR
));
216 /* The tasklet will hopefully advance the queue... */
220 vdbg_dump_regs(atchan
);
222 channel_writel(atchan
, SADDR
, 0);
223 channel_writel(atchan
, DADDR
, 0);
224 channel_writel(atchan
, CTRLA
, 0);
225 channel_writel(atchan
, CTRLB
, 0);
226 channel_writel(atchan
, DSCR
, first
->txd
.phys
);
227 dma_writel(atdma
, CHER
, atchan
->mask
);
229 vdbg_dump_regs(atchan
);
233 * atc_chain_complete - finish work for one transaction chain
234 * @atchan: channel we work on
235 * @desc: descriptor at the head of the chain we want do complete
237 * Called with atchan->lock held and bh disabled */
239 atc_chain_complete(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
241 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
243 dev_vdbg(chan2dev(&atchan
->chan_common
),
244 "descriptor %u complete\n", txd
->cookie
);
246 /* mark the descriptor as complete for non cyclic cases only */
247 if (!atc_chan_is_cyclic(atchan
))
248 dma_cookie_complete(txd
);
250 /* move children to free_list */
251 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
252 /* move myself to free_list */
253 list_move(&desc
->desc_node
, &atchan
->free_list
);
255 /* unmap dma addresses (not on slave channels) */
256 if (!atchan
->chan_common
.private) {
257 struct device
*parent
= chan2parent(&atchan
->chan_common
);
258 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
259 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
260 dma_unmap_single(parent
,
262 desc
->len
, DMA_FROM_DEVICE
);
264 dma_unmap_page(parent
,
266 desc
->len
, DMA_FROM_DEVICE
);
268 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
269 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
270 dma_unmap_single(parent
,
272 desc
->len
, DMA_TO_DEVICE
);
274 dma_unmap_page(parent
,
276 desc
->len
, DMA_TO_DEVICE
);
280 /* for cyclic transfers,
281 * no need to replay callback function while stopping */
282 if (!atc_chan_is_cyclic(atchan
)) {
283 dma_async_tx_callback callback
= txd
->callback
;
284 void *param
= txd
->callback_param
;
287 * The API requires that no submissions are done from a
288 * callback, so we don't need to drop the lock here
294 dma_run_dependencies(txd
);
298 * atc_complete_all - finish work for all transactions
299 * @atchan: channel to complete transactions for
301 * Eventually submit queued descriptors if any
303 * Assume channel is idle while calling this function
304 * Called with atchan->lock held and bh disabled
306 static void atc_complete_all(struct at_dma_chan
*atchan
)
308 struct at_desc
*desc
, *_desc
;
311 dev_vdbg(chan2dev(&atchan
->chan_common
), "complete all\n");
313 BUG_ON(atc_chan_is_enabled(atchan
));
316 * Submit queued descriptors ASAP, i.e. before we go through
317 * the completed ones.
319 if (!list_empty(&atchan
->queue
))
320 atc_dostart(atchan
, atc_first_queued(atchan
));
321 /* empty active_list now it is completed */
322 list_splice_init(&atchan
->active_list
, &list
);
323 /* empty queue list by moving descriptors (if any) to active_list */
324 list_splice_init(&atchan
->queue
, &atchan
->active_list
);
326 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
327 atc_chain_complete(atchan
, desc
);
331 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
332 * @atchan: channel to be cleaned up
334 * Called with atchan->lock held and bh disabled
336 static void atc_cleanup_descriptors(struct at_dma_chan
*atchan
)
338 struct at_desc
*desc
, *_desc
;
339 struct at_desc
*child
;
341 dev_vdbg(chan2dev(&atchan
->chan_common
), "cleanup descriptors\n");
343 list_for_each_entry_safe(desc
, _desc
, &atchan
->active_list
, desc_node
) {
344 if (!(desc
->lli
.ctrla
& ATC_DONE
))
345 /* This one is currently in progress */
348 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
349 if (!(child
->lli
.ctrla
& ATC_DONE
))
350 /* Currently in progress */
354 * No descriptors so far seem to be in progress, i.e.
355 * this chain must be done.
357 atc_chain_complete(atchan
, desc
);
362 * atc_advance_work - at the end of a transaction, move forward
363 * @atchan: channel where the transaction ended
365 * Called with atchan->lock held and bh disabled
367 static void atc_advance_work(struct at_dma_chan
*atchan
)
369 dev_vdbg(chan2dev(&atchan
->chan_common
), "advance_work\n");
371 if (list_empty(&atchan
->active_list
) ||
372 list_is_singular(&atchan
->active_list
)) {
373 atc_complete_all(atchan
);
375 atc_chain_complete(atchan
, atc_first_active(atchan
));
377 atc_dostart(atchan
, atc_first_active(atchan
));
383 * atc_handle_error - handle errors reported by DMA controller
384 * @atchan: channel where error occurs
386 * Called with atchan->lock held and bh disabled
388 static void atc_handle_error(struct at_dma_chan
*atchan
)
390 struct at_desc
*bad_desc
;
391 struct at_desc
*child
;
394 * The descriptor currently at the head of the active list is
395 * broked. Since we don't have any way to report errors, we'll
396 * just have to scream loudly and try to carry on.
398 bad_desc
= atc_first_active(atchan
);
399 list_del_init(&bad_desc
->desc_node
);
401 /* As we are stopped, take advantage to push queued descriptors
403 list_splice_init(&atchan
->queue
, atchan
->active_list
.prev
);
405 /* Try to restart the controller */
406 if (!list_empty(&atchan
->active_list
))
407 atc_dostart(atchan
, atc_first_active(atchan
));
410 * KERN_CRITICAL may seem harsh, but since this only happens
411 * when someone submits a bad physical address in a
412 * descriptor, we should consider ourselves lucky that the
413 * controller flagged an error instead of scribbling over
414 * random memory locations.
416 dev_crit(chan2dev(&atchan
->chan_common
),
417 "Bad descriptor submitted for DMA!\n");
418 dev_crit(chan2dev(&atchan
->chan_common
),
419 " cookie: %d\n", bad_desc
->txd
.cookie
);
420 atc_dump_lli(atchan
, &bad_desc
->lli
);
421 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
422 atc_dump_lli(atchan
, &child
->lli
);
424 /* Pretend the descriptor completed successfully */
425 atc_chain_complete(atchan
, bad_desc
);
429 * atc_handle_cyclic - at the end of a period, run callback function
430 * @atchan: channel used for cyclic operations
432 * Called with atchan->lock held and bh disabled
434 static void atc_handle_cyclic(struct at_dma_chan
*atchan
)
436 struct at_desc
*first
= atc_first_active(atchan
);
437 struct dma_async_tx_descriptor
*txd
= &first
->txd
;
438 dma_async_tx_callback callback
= txd
->callback
;
439 void *param
= txd
->callback_param
;
441 dev_vdbg(chan2dev(&atchan
->chan_common
),
442 "new cyclic period llp 0x%08x\n",
443 channel_readl(atchan
, DSCR
));
449 /*-- IRQ & Tasklet ---------------------------------------------------*/
451 static void atc_tasklet(unsigned long data
)
453 struct at_dma_chan
*atchan
= (struct at_dma_chan
*)data
;
456 spin_lock_irqsave(&atchan
->lock
, flags
);
457 if (test_and_clear_bit(ATC_IS_ERROR
, &atchan
->status
))
458 atc_handle_error(atchan
);
459 else if (atc_chan_is_cyclic(atchan
))
460 atc_handle_cyclic(atchan
);
462 atc_advance_work(atchan
);
464 spin_unlock_irqrestore(&atchan
->lock
, flags
);
467 static irqreturn_t
at_dma_interrupt(int irq
, void *dev_id
)
469 struct at_dma
*atdma
= (struct at_dma
*)dev_id
;
470 struct at_dma_chan
*atchan
;
472 u32 status
, pending
, imr
;
476 imr
= dma_readl(atdma
, EBCIMR
);
477 status
= dma_readl(atdma
, EBCISR
);
478 pending
= status
& imr
;
483 dev_vdbg(atdma
->dma_common
.dev
,
484 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
485 status
, imr
, pending
);
487 for (i
= 0; i
< atdma
->dma_common
.chancnt
; i
++) {
488 atchan
= &atdma
->chan
[i
];
489 if (pending
& (AT_DMA_BTC(i
) | AT_DMA_ERR(i
))) {
490 if (pending
& AT_DMA_ERR(i
)) {
491 /* Disable channel on AHB error */
492 dma_writel(atdma
, CHDR
,
493 AT_DMA_RES(i
) | atchan
->mask
);
494 /* Give information to tasklet */
495 set_bit(ATC_IS_ERROR
, &atchan
->status
);
497 tasklet_schedule(&atchan
->tasklet
);
508 /*-- DMA Engine API --------------------------------------------------*/
511 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
512 * @desc: descriptor at the head of the transaction chain
514 * Queue chain if DMA engine is working already
516 * Cookie increment and adding to active_list or queue must be atomic
518 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
)
520 struct at_desc
*desc
= txd_to_at_desc(tx
);
521 struct at_dma_chan
*atchan
= to_at_dma_chan(tx
->chan
);
525 spin_lock_irqsave(&atchan
->lock
, flags
);
526 cookie
= dma_cookie_assign(tx
);
528 if (list_empty(&atchan
->active_list
)) {
529 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
531 atc_dostart(atchan
, desc
);
532 list_add_tail(&desc
->desc_node
, &atchan
->active_list
);
534 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
536 list_add_tail(&desc
->desc_node
, &atchan
->queue
);
539 spin_unlock_irqrestore(&atchan
->lock
, flags
);
545 * atc_prep_dma_memcpy - prepare a memcpy operation
546 * @chan: the channel to prepare operation on
547 * @dest: operation virtual destination address
548 * @src: operation virtual source address
549 * @len: operation length
550 * @flags: tx descriptor status flags
552 static struct dma_async_tx_descriptor
*
553 atc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
554 size_t len
, unsigned long flags
)
556 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
557 struct at_desc
*desc
= NULL
;
558 struct at_desc
*first
= NULL
;
559 struct at_desc
*prev
= NULL
;
562 unsigned int src_width
;
563 unsigned int dst_width
;
567 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
568 dest
, src
, len
, flags
);
570 if (unlikely(!len
)) {
571 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
575 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
576 | ATC_SRC_ADDR_MODE_INCR
577 | ATC_DST_ADDR_MODE_INCR
581 * We can be a lot more clever here, but this should take care
582 * of the most common optimization.
584 if (!((src
| dest
| len
) & 3)) {
585 ctrla
= ATC_SRC_WIDTH_WORD
| ATC_DST_WIDTH_WORD
;
586 src_width
= dst_width
= 2;
587 } else if (!((src
| dest
| len
) & 1)) {
588 ctrla
= ATC_SRC_WIDTH_HALFWORD
| ATC_DST_WIDTH_HALFWORD
;
589 src_width
= dst_width
= 1;
591 ctrla
= ATC_SRC_WIDTH_BYTE
| ATC_DST_WIDTH_BYTE
;
592 src_width
= dst_width
= 0;
595 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
596 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
599 desc
= atc_desc_get(atchan
);
603 desc
->lli
.saddr
= src
+ offset
;
604 desc
->lli
.daddr
= dest
+ offset
;
605 desc
->lli
.ctrla
= ctrla
| xfer_count
;
606 desc
->lli
.ctrlb
= ctrlb
;
608 desc
->txd
.cookie
= 0;
610 atc_desc_chain(&first
, &prev
, desc
);
613 /* First descriptor of the chain embedds additional information */
614 first
->txd
.cookie
= -EBUSY
;
617 /* set end-of-link to the last link descriptor of list*/
620 first
->txd
.flags
= flags
; /* client is in control of this ack */
625 atc_desc_put(atchan
, first
);
631 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
633 * @sgl: scatterlist to transfer to/from
634 * @sg_len: number of entries in @scatterlist
635 * @direction: DMA direction
636 * @flags: tx descriptor status flags
637 * @context: transaction context (ignored)
639 static struct dma_async_tx_descriptor
*
640 atc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
641 unsigned int sg_len
, enum dma_transfer_direction direction
,
642 unsigned long flags
, void *context
)
644 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
645 struct at_dma_slave
*atslave
= chan
->private;
646 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
647 struct at_desc
*first
= NULL
;
648 struct at_desc
*prev
= NULL
;
652 unsigned int reg_width
;
653 unsigned int mem_width
;
655 struct scatterlist
*sg
;
656 size_t total_len
= 0;
658 dev_vdbg(chan2dev(chan
), "prep_slave_sg (%d): %s f0x%lx\n",
660 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
663 if (unlikely(!atslave
|| !sg_len
)) {
664 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
668 ctrla
= ATC_SCSIZE(sconfig
->src_maxburst
)
669 | ATC_DCSIZE(sconfig
->dst_maxburst
);
674 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
675 ctrla
|= ATC_DST_WIDTH(reg_width
);
676 ctrlb
|= ATC_DST_ADDR_MODE_FIXED
677 | ATC_SRC_ADDR_MODE_INCR
679 | ATC_SIF(AT_DMA_MEM_IF
) | ATC_DIF(AT_DMA_PER_IF
);
680 reg
= sconfig
->dst_addr
;
681 for_each_sg(sgl
, sg
, sg_len
, i
) {
682 struct at_desc
*desc
;
686 desc
= atc_desc_get(atchan
);
690 mem
= sg_dma_address(sg
);
691 len
= sg_dma_len(sg
);
693 if (unlikely(mem
& 3 || len
& 3))
696 desc
->lli
.saddr
= mem
;
697 desc
->lli
.daddr
= reg
;
698 desc
->lli
.ctrla
= ctrla
699 | ATC_SRC_WIDTH(mem_width
)
701 desc
->lli
.ctrlb
= ctrlb
;
703 atc_desc_chain(&first
, &prev
, desc
);
708 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
709 ctrla
|= ATC_SRC_WIDTH(reg_width
);
710 ctrlb
|= ATC_DST_ADDR_MODE_INCR
711 | ATC_SRC_ADDR_MODE_FIXED
713 | ATC_SIF(AT_DMA_PER_IF
) | ATC_DIF(AT_DMA_MEM_IF
);
715 reg
= sconfig
->src_addr
;
716 for_each_sg(sgl
, sg
, sg_len
, i
) {
717 struct at_desc
*desc
;
721 desc
= atc_desc_get(atchan
);
725 mem
= sg_dma_address(sg
);
726 len
= sg_dma_len(sg
);
728 if (unlikely(mem
& 3 || len
& 3))
731 desc
->lli
.saddr
= reg
;
732 desc
->lli
.daddr
= mem
;
733 desc
->lli
.ctrla
= ctrla
734 | ATC_DST_WIDTH(mem_width
)
736 desc
->lli
.ctrlb
= ctrlb
;
738 atc_desc_chain(&first
, &prev
, desc
);
746 /* set end-of-link to the last link descriptor of list*/
749 /* First descriptor of the chain embedds additional information */
750 first
->txd
.cookie
= -EBUSY
;
751 first
->len
= total_len
;
753 /* first link descriptor of list is responsible of flags */
754 first
->txd
.flags
= flags
; /* client is in control of this ack */
759 dev_err(chan2dev(chan
), "not enough descriptors available\n");
760 atc_desc_put(atchan
, first
);
765 * atc_dma_cyclic_check_values
766 * Check for too big/unaligned periods and unaligned DMA buffer
769 atc_dma_cyclic_check_values(unsigned int reg_width
, dma_addr_t buf_addr
,
770 size_t period_len
, enum dma_transfer_direction direction
)
772 if (period_len
> (ATC_BTSIZE_MAX
<< reg_width
))
774 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
776 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
778 if (unlikely(!(direction
& (DMA_DEV_TO_MEM
| DMA_MEM_TO_DEV
))))
788 * atc_dma_cyclic_fill_desc - Fill one period decriptor
791 atc_dma_cyclic_fill_desc(struct dma_chan
*chan
, struct at_desc
*desc
,
792 unsigned int period_index
, dma_addr_t buf_addr
,
793 unsigned int reg_width
, size_t period_len
,
794 enum dma_transfer_direction direction
)
796 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
797 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
800 /* prepare common CRTLA value */
801 ctrla
= ATC_SCSIZE(sconfig
->src_maxburst
)
802 | ATC_DCSIZE(sconfig
->dst_maxburst
)
803 | ATC_DST_WIDTH(reg_width
)
804 | ATC_SRC_WIDTH(reg_width
)
805 | period_len
>> reg_width
;
809 desc
->lli
.saddr
= buf_addr
+ (period_len
* period_index
);
810 desc
->lli
.daddr
= sconfig
->dst_addr
;
811 desc
->lli
.ctrla
= ctrla
;
812 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_FIXED
813 | ATC_SRC_ADDR_MODE_INCR
815 | ATC_SIF(AT_DMA_MEM_IF
)
816 | ATC_DIF(AT_DMA_PER_IF
);
820 desc
->lli
.saddr
= sconfig
->src_addr
;
821 desc
->lli
.daddr
= buf_addr
+ (period_len
* period_index
);
822 desc
->lli
.ctrla
= ctrla
;
823 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_INCR
824 | ATC_SRC_ADDR_MODE_FIXED
826 | ATC_SIF(AT_DMA_PER_IF
)
827 | ATC_DIF(AT_DMA_MEM_IF
);
838 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
839 * @chan: the DMA channel to prepare
840 * @buf_addr: physical DMA address where the buffer starts
841 * @buf_len: total number of bytes for the entire buffer
842 * @period_len: number of bytes for each period
843 * @direction: transfer direction, to or from device
844 * @context: transfer context (ignored)
846 static struct dma_async_tx_descriptor
*
847 atc_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
848 size_t period_len
, enum dma_transfer_direction direction
,
851 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
852 struct at_dma_slave
*atslave
= chan
->private;
853 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
854 struct at_desc
*first
= NULL
;
855 struct at_desc
*prev
= NULL
;
856 unsigned long was_cyclic
;
857 unsigned int reg_width
;
858 unsigned int periods
= buf_len
/ period_len
;
861 dev_vdbg(chan2dev(chan
), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
862 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
864 periods
, buf_len
, period_len
);
866 if (unlikely(!atslave
|| !buf_len
|| !period_len
)) {
867 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: length is zero!\n");
871 was_cyclic
= test_and_set_bit(ATC_IS_CYCLIC
, &atchan
->status
);
873 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: channel in use!\n");
877 if (sconfig
->direction
== DMA_MEM_TO_DEV
)
878 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
880 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
882 /* Check for too big/unaligned periods and unaligned DMA buffer */
883 if (atc_dma_cyclic_check_values(reg_width
, buf_addr
,
884 period_len
, direction
))
887 /* build cyclic linked list */
888 for (i
= 0; i
< periods
; i
++) {
889 struct at_desc
*desc
;
891 desc
= atc_desc_get(atchan
);
895 if (atc_dma_cyclic_fill_desc(chan
, desc
, i
, buf_addr
,
896 reg_width
, period_len
, direction
))
899 atc_desc_chain(&first
, &prev
, desc
);
902 /* lets make a cyclic list */
903 prev
->lli
.dscr
= first
->txd
.phys
;
905 /* First descriptor of the chain embedds additional information */
906 first
->txd
.cookie
= -EBUSY
;
907 first
->len
= buf_len
;
912 dev_err(chan2dev(chan
), "not enough descriptors available\n");
913 atc_desc_put(atchan
, first
);
915 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
919 static int set_runtime_config(struct dma_chan
*chan
,
920 struct dma_slave_config
*sconfig
)
922 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
924 /* Check if it is chan is configured for slave transfers */
928 memcpy(&atchan
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
930 convert_burst(&atchan
->dma_sconfig
.src_maxburst
);
931 convert_burst(&atchan
->dma_sconfig
.dst_maxburst
);
937 static int atc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
940 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
941 struct at_dma
*atdma
= to_at_dma(chan
->device
);
942 int chan_id
= atchan
->chan_common
.chan_id
;
947 dev_vdbg(chan2dev(chan
), "atc_control (%d)\n", cmd
);
949 if (cmd
== DMA_PAUSE
) {
950 spin_lock_irqsave(&atchan
->lock
, flags
);
952 dma_writel(atdma
, CHER
, AT_DMA_SUSP(chan_id
));
953 set_bit(ATC_IS_PAUSED
, &atchan
->status
);
955 spin_unlock_irqrestore(&atchan
->lock
, flags
);
956 } else if (cmd
== DMA_RESUME
) {
957 if (!atc_chan_is_paused(atchan
))
960 spin_lock_irqsave(&atchan
->lock
, flags
);
962 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
));
963 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
965 spin_unlock_irqrestore(&atchan
->lock
, flags
);
966 } else if (cmd
== DMA_TERMINATE_ALL
) {
967 struct at_desc
*desc
, *_desc
;
969 * This is only called when something went wrong elsewhere, so
970 * we don't really care about the data. Just disable the
971 * channel. We still have to poll the channel enable bit due
972 * to AHB/HSB limitations.
974 spin_lock_irqsave(&atchan
->lock
, flags
);
976 /* disabling channel: must also remove suspend state */
977 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
) | atchan
->mask
);
979 /* confirm that this channel is disabled */
980 while (dma_readl(atdma
, CHSR
) & atchan
->mask
)
983 /* active_list entries will end up before queued entries */
984 list_splice_init(&atchan
->queue
, &list
);
985 list_splice_init(&atchan
->active_list
, &list
);
987 /* Flush all pending and queued descriptors */
988 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
989 atc_chain_complete(atchan
, desc
);
991 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
992 /* if channel dedicated to cyclic operations, free it */
993 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
995 spin_unlock_irqrestore(&atchan
->lock
, flags
);
996 } else if (cmd
== DMA_SLAVE_CONFIG
) {
997 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1006 * atc_tx_status - poll for transaction completion
1007 * @chan: DMA channel
1008 * @cookie: transaction identifier to check status of
1009 * @txstate: if not %NULL updated with transaction state
1011 * If @txstate is passed in, upon return it reflect the driver
1012 * internal state and can be used with dma_async_is_complete() to check
1013 * the status of multiple cookies without re-checking hardware state.
1015 static enum dma_status
1016 atc_tx_status(struct dma_chan
*chan
,
1017 dma_cookie_t cookie
,
1018 struct dma_tx_state
*txstate
)
1020 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1021 dma_cookie_t last_used
;
1022 dma_cookie_t last_complete
;
1023 unsigned long flags
;
1024 enum dma_status ret
;
1026 spin_lock_irqsave(&atchan
->lock
, flags
);
1028 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1029 if (ret
!= DMA_SUCCESS
) {
1030 atc_cleanup_descriptors(atchan
);
1032 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1035 last_complete
= chan
->completed_cookie
;
1036 last_used
= chan
->cookie
;
1038 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1040 if (ret
!= DMA_SUCCESS
)
1041 dma_set_residue(txstate
, atc_first_active(atchan
)->len
);
1043 if (atc_chan_is_paused(atchan
))
1046 dev_vdbg(chan2dev(chan
), "tx_status %d: cookie = %d (d%d, u%d)\n",
1047 ret
, cookie
, last_complete
? last_complete
: 0,
1048 last_used
? last_used
: 0);
1054 * atc_issue_pending - try to finish work
1055 * @chan: target DMA channel
1057 static void atc_issue_pending(struct dma_chan
*chan
)
1059 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1060 unsigned long flags
;
1062 dev_vdbg(chan2dev(chan
), "issue_pending\n");
1064 /* Not needed for cyclic transfers */
1065 if (atc_chan_is_cyclic(atchan
))
1068 spin_lock_irqsave(&atchan
->lock
, flags
);
1069 if (!atc_chan_is_enabled(atchan
)) {
1070 atc_advance_work(atchan
);
1072 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1076 * atc_alloc_chan_resources - allocate resources for DMA channel
1077 * @chan: allocate descriptor resources for this channel
1078 * @client: current client requesting the channel be ready for requests
1080 * return - the number of allocated descriptors
1082 static int atc_alloc_chan_resources(struct dma_chan
*chan
)
1084 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1085 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1086 struct at_desc
*desc
;
1087 struct at_dma_slave
*atslave
;
1088 unsigned long flags
;
1091 LIST_HEAD(tmp_list
);
1093 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1095 /* ASSERT: channel is idle */
1096 if (atc_chan_is_enabled(atchan
)) {
1097 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
1101 cfg
= ATC_DEFAULT_CFG
;
1103 atslave
= chan
->private;
1106 * We need controller-specific data to set up slave
1109 BUG_ON(!atslave
->dma_dev
|| atslave
->dma_dev
!= atdma
->dma_common
.dev
);
1111 /* if cfg configuration specified take it instad of default */
1116 /* have we already been set up?
1117 * reconfigure channel but no need to reallocate descriptors */
1118 if (!list_empty(&atchan
->free_list
))
1119 return atchan
->descs_allocated
;
1121 /* Allocate initial pool of descriptors */
1122 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
1123 desc
= atc_alloc_descriptor(chan
, GFP_KERNEL
);
1125 dev_err(atdma
->dma_common
.dev
,
1126 "Only %d initial descriptors\n", i
);
1129 list_add_tail(&desc
->desc_node
, &tmp_list
);
1132 spin_lock_irqsave(&atchan
->lock
, flags
);
1133 atchan
->descs_allocated
= i
;
1134 list_splice(&tmp_list
, &atchan
->free_list
);
1135 dma_cookie_init(chan
);
1136 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1138 /* channel parameters */
1139 channel_writel(atchan
, CFG
, cfg
);
1141 dev_dbg(chan2dev(chan
),
1142 "alloc_chan_resources: allocated %d descriptors\n",
1143 atchan
->descs_allocated
);
1145 return atchan
->descs_allocated
;
1149 * atc_free_chan_resources - free all channel resources
1150 * @chan: DMA channel
1152 static void atc_free_chan_resources(struct dma_chan
*chan
)
1154 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1155 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1156 struct at_desc
*desc
, *_desc
;
1159 dev_dbg(chan2dev(chan
), "free_chan_resources: (descs allocated=%u)\n",
1160 atchan
->descs_allocated
);
1162 /* ASSERT: channel is idle */
1163 BUG_ON(!list_empty(&atchan
->active_list
));
1164 BUG_ON(!list_empty(&atchan
->queue
));
1165 BUG_ON(atc_chan_is_enabled(atchan
));
1167 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
1168 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1169 list_del(&desc
->desc_node
);
1170 /* free link descriptor */
1171 dma_pool_free(atdma
->dma_desc_pool
, desc
, desc
->txd
.phys
);
1173 list_splice_init(&atchan
->free_list
, &list
);
1174 atchan
->descs_allocated
= 0;
1177 dev_vdbg(chan2dev(chan
), "free_chan_resources: done\n");
1181 /*-- Module Management -----------------------------------------------*/
1183 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1184 static struct at_dma_platform_data at91sam9rl_config
= {
1187 static struct at_dma_platform_data at91sam9g45_config
= {
1191 #if defined(CONFIG_OF)
1192 static const struct of_device_id atmel_dma_dt_ids
[] = {
1194 .compatible
= "atmel,at91sam9rl-dma",
1195 .data
= &at91sam9rl_config
,
1197 .compatible
= "atmel,at91sam9g45-dma",
1198 .data
= &at91sam9g45_config
,
1204 MODULE_DEVICE_TABLE(of
, atmel_dma_dt_ids
);
1207 static const struct platform_device_id atdma_devtypes
[] = {
1209 .name
= "at91sam9rl_dma",
1210 .driver_data
= (unsigned long) &at91sam9rl_config
,
1212 .name
= "at91sam9g45_dma",
1213 .driver_data
= (unsigned long) &at91sam9g45_config
,
1219 static inline const struct at_dma_platform_data
* __init
at_dma_get_driver_data(
1220 struct platform_device
*pdev
)
1222 if (pdev
->dev
.of_node
) {
1223 const struct of_device_id
*match
;
1224 match
= of_match_node(atmel_dma_dt_ids
, pdev
->dev
.of_node
);
1229 return (struct at_dma_platform_data
*)
1230 platform_get_device_id(pdev
)->driver_data
;
1234 * at_dma_off - disable DMA controller
1235 * @atdma: the Atmel HDAMC device
1237 static void at_dma_off(struct at_dma
*atdma
)
1239 dma_writel(atdma
, EN
, 0);
1241 /* disable all interrupts */
1242 dma_writel(atdma
, EBCIDR
, -1L);
1244 /* confirm that all channels are disabled */
1245 while (dma_readl(atdma
, CHSR
) & atdma
->all_chan_mask
)
1249 static int __init
at_dma_probe(struct platform_device
*pdev
)
1251 struct resource
*io
;
1252 struct at_dma
*atdma
;
1257 const struct at_dma_platform_data
*plat_dat
;
1259 /* setup platform data for each SoC */
1260 dma_cap_set(DMA_MEMCPY
, at91sam9rl_config
.cap_mask
);
1261 dma_cap_set(DMA_MEMCPY
, at91sam9g45_config
.cap_mask
);
1262 dma_cap_set(DMA_SLAVE
, at91sam9g45_config
.cap_mask
);
1264 /* get DMA parameters from controller type */
1265 plat_dat
= at_dma_get_driver_data(pdev
);
1269 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1273 irq
= platform_get_irq(pdev
, 0);
1277 size
= sizeof(struct at_dma
);
1278 size
+= plat_dat
->nr_channels
* sizeof(struct at_dma_chan
);
1279 atdma
= kzalloc(size
, GFP_KERNEL
);
1283 /* discover transaction capabilities */
1284 atdma
->dma_common
.cap_mask
= plat_dat
->cap_mask
;
1285 atdma
->all_chan_mask
= (1 << plat_dat
->nr_channels
) - 1;
1287 size
= resource_size(io
);
1288 if (!request_mem_region(io
->start
, size
, pdev
->dev
.driver
->name
)) {
1293 atdma
->regs
= ioremap(io
->start
, size
);
1299 atdma
->clk
= clk_get(&pdev
->dev
, "dma_clk");
1300 if (IS_ERR(atdma
->clk
)) {
1301 err
= PTR_ERR(atdma
->clk
);
1304 clk_enable(atdma
->clk
);
1306 /* force dma off, just in case */
1309 err
= request_irq(irq
, at_dma_interrupt
, 0, "at_hdmac", atdma
);
1313 platform_set_drvdata(pdev
, atdma
);
1315 /* create a pool of consistent memory blocks for hardware descriptors */
1316 atdma
->dma_desc_pool
= dma_pool_create("at_hdmac_desc_pool",
1317 &pdev
->dev
, sizeof(struct at_desc
),
1318 4 /* word alignment */, 0);
1319 if (!atdma
->dma_desc_pool
) {
1320 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1322 goto err_pool_create
;
1325 /* clear any pending interrupt */
1326 while (dma_readl(atdma
, EBCISR
))
1329 /* initialize channels related values */
1330 INIT_LIST_HEAD(&atdma
->dma_common
.channels
);
1331 for (i
= 0; i
< plat_dat
->nr_channels
; i
++) {
1332 struct at_dma_chan
*atchan
= &atdma
->chan
[i
];
1334 atchan
->chan_common
.device
= &atdma
->dma_common
;
1335 dma_cookie_init(&atchan
->chan_common
);
1336 list_add_tail(&atchan
->chan_common
.device_node
,
1337 &atdma
->dma_common
.channels
);
1339 atchan
->ch_regs
= atdma
->regs
+ ch_regs(i
);
1340 spin_lock_init(&atchan
->lock
);
1341 atchan
->mask
= 1 << i
;
1343 INIT_LIST_HEAD(&atchan
->active_list
);
1344 INIT_LIST_HEAD(&atchan
->queue
);
1345 INIT_LIST_HEAD(&atchan
->free_list
);
1347 tasklet_init(&atchan
->tasklet
, atc_tasklet
,
1348 (unsigned long)atchan
);
1349 atc_enable_chan_irq(atdma
, i
);
1352 /* set base routines */
1353 atdma
->dma_common
.device_alloc_chan_resources
= atc_alloc_chan_resources
;
1354 atdma
->dma_common
.device_free_chan_resources
= atc_free_chan_resources
;
1355 atdma
->dma_common
.device_tx_status
= atc_tx_status
;
1356 atdma
->dma_common
.device_issue_pending
= atc_issue_pending
;
1357 atdma
->dma_common
.dev
= &pdev
->dev
;
1359 /* set prep routines based on capability */
1360 if (dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
))
1361 atdma
->dma_common
.device_prep_dma_memcpy
= atc_prep_dma_memcpy
;
1363 if (dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
)) {
1364 atdma
->dma_common
.device_prep_slave_sg
= atc_prep_slave_sg
;
1365 /* controller can do slave DMA: can trigger cyclic transfers */
1366 dma_cap_set(DMA_CYCLIC
, atdma
->dma_common
.cap_mask
);
1367 atdma
->dma_common
.device_prep_dma_cyclic
= atc_prep_dma_cyclic
;
1368 atdma
->dma_common
.device_control
= atc_control
;
1371 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1373 dev_info(&pdev
->dev
, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1374 dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
) ? "cpy " : "",
1375 dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
) ? "slave " : "",
1376 plat_dat
->nr_channels
);
1378 dma_async_device_register(&atdma
->dma_common
);
1383 platform_set_drvdata(pdev
, NULL
);
1384 free_irq(platform_get_irq(pdev
, 0), atdma
);
1386 clk_disable(atdma
->clk
);
1387 clk_put(atdma
->clk
);
1389 iounmap(atdma
->regs
);
1392 release_mem_region(io
->start
, size
);
1398 static int __exit
at_dma_remove(struct platform_device
*pdev
)
1400 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1401 struct dma_chan
*chan
, *_chan
;
1402 struct resource
*io
;
1405 dma_async_device_unregister(&atdma
->dma_common
);
1407 dma_pool_destroy(atdma
->dma_desc_pool
);
1408 platform_set_drvdata(pdev
, NULL
);
1409 free_irq(platform_get_irq(pdev
, 0), atdma
);
1411 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1413 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1415 /* Disable interrupts */
1416 atc_disable_chan_irq(atdma
, chan
->chan_id
);
1417 tasklet_disable(&atchan
->tasklet
);
1419 tasklet_kill(&atchan
->tasklet
);
1420 list_del(&chan
->device_node
);
1423 clk_disable(atdma
->clk
);
1424 clk_put(atdma
->clk
);
1426 iounmap(atdma
->regs
);
1429 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1430 release_mem_region(io
->start
, resource_size(io
));
1437 static void at_dma_shutdown(struct platform_device
*pdev
)
1439 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1441 at_dma_off(platform_get_drvdata(pdev
));
1442 clk_disable(atdma
->clk
);
1445 static int at_dma_prepare(struct device
*dev
)
1447 struct platform_device
*pdev
= to_platform_device(dev
);
1448 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1449 struct dma_chan
*chan
, *_chan
;
1451 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1453 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1454 /* wait for transaction completion (except in cyclic case) */
1455 if (atc_chan_is_enabled(atchan
) && !atc_chan_is_cyclic(atchan
))
1461 static void atc_suspend_cyclic(struct at_dma_chan
*atchan
)
1463 struct dma_chan
*chan
= &atchan
->chan_common
;
1465 /* Channel should be paused by user
1466 * do it anyway even if it is not done already */
1467 if (!atc_chan_is_paused(atchan
)) {
1468 dev_warn(chan2dev(chan
),
1469 "cyclic channel not paused, should be done by channel user\n");
1470 atc_control(chan
, DMA_PAUSE
, 0);
1473 /* now preserve additional data for cyclic operations */
1474 /* next descriptor address in the cyclic list */
1475 atchan
->save_dscr
= channel_readl(atchan
, DSCR
);
1477 vdbg_dump_regs(atchan
);
1480 static int at_dma_suspend_noirq(struct device
*dev
)
1482 struct platform_device
*pdev
= to_platform_device(dev
);
1483 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1484 struct dma_chan
*chan
, *_chan
;
1487 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1489 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1491 if (atc_chan_is_cyclic(atchan
))
1492 atc_suspend_cyclic(atchan
);
1493 atchan
->save_cfg
= channel_readl(atchan
, CFG
);
1495 atdma
->save_imr
= dma_readl(atdma
, EBCIMR
);
1497 /* disable DMA controller */
1499 clk_disable(atdma
->clk
);
1503 static void atc_resume_cyclic(struct at_dma_chan
*atchan
)
1505 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
1507 /* restore channel status for cyclic descriptors list:
1508 * next descriptor in the cyclic list at the time of suspend */
1509 channel_writel(atchan
, SADDR
, 0);
1510 channel_writel(atchan
, DADDR
, 0);
1511 channel_writel(atchan
, CTRLA
, 0);
1512 channel_writel(atchan
, CTRLB
, 0);
1513 channel_writel(atchan
, DSCR
, atchan
->save_dscr
);
1514 dma_writel(atdma
, CHER
, atchan
->mask
);
1516 /* channel pause status should be removed by channel user
1517 * We cannot take the initiative to do it here */
1519 vdbg_dump_regs(atchan
);
1522 static int at_dma_resume_noirq(struct device
*dev
)
1524 struct platform_device
*pdev
= to_platform_device(dev
);
1525 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1526 struct dma_chan
*chan
, *_chan
;
1528 /* bring back DMA controller */
1529 clk_enable(atdma
->clk
);
1530 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1532 /* clear any pending interrupt */
1533 while (dma_readl(atdma
, EBCISR
))
1536 /* restore saved data */
1537 dma_writel(atdma
, EBCIER
, atdma
->save_imr
);
1538 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1540 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1542 channel_writel(atchan
, CFG
, atchan
->save_cfg
);
1543 if (atc_chan_is_cyclic(atchan
))
1544 atc_resume_cyclic(atchan
);
1549 static const struct dev_pm_ops at_dma_dev_pm_ops
= {
1550 .prepare
= at_dma_prepare
,
1551 .suspend_noirq
= at_dma_suspend_noirq
,
1552 .resume_noirq
= at_dma_resume_noirq
,
1555 static struct platform_driver at_dma_driver
= {
1556 .remove
= __exit_p(at_dma_remove
),
1557 .shutdown
= at_dma_shutdown
,
1558 .id_table
= atdma_devtypes
,
1561 .pm
= &at_dma_dev_pm_ops
,
1562 .of_match_table
= of_match_ptr(atmel_dma_dt_ids
),
1566 static int __init
at_dma_init(void)
1568 return platform_driver_probe(&at_dma_driver
, at_dma_probe
);
1570 subsys_initcall(at_dma_init
);
1572 static void __exit
at_dma_exit(void)
1574 platform_driver_unregister(&at_dma_driver
);
1576 module_exit(at_dma_exit
);
1578 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1579 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1580 MODULE_LICENSE("GPL");
1581 MODULE_ALIAS("platform:at_hdmac");