2 * Driver for the Cirrus Logic EP93xx DMA Controller
4 * Copyright (C) 2011 Mika Westerberg
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
13 * This driver is based on dw_dmac and amba-pl08x drivers.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
32 #define M2P_CONTROL 0x0000
33 #define M2P_CONTROL_STALLINT BIT(0)
34 #define M2P_CONTROL_NFBINT BIT(1)
35 #define M2P_CONTROL_CH_ERROR_INT BIT(3)
36 #define M2P_CONTROL_ENABLE BIT(4)
37 #define M2P_CONTROL_ICE BIT(6)
39 #define M2P_INTERRUPT 0x0004
40 #define M2P_INTERRUPT_STALL BIT(0)
41 #define M2P_INTERRUPT_NFB BIT(1)
42 #define M2P_INTERRUPT_ERROR BIT(3)
44 #define M2P_PPALLOC 0x0008
45 #define M2P_STATUS 0x000c
47 #define M2P_MAXCNT0 0x0020
48 #define M2P_BASE0 0x0024
49 #define M2P_MAXCNT1 0x0030
50 #define M2P_BASE1 0x0034
52 #define M2P_STATE_IDLE 0
53 #define M2P_STATE_STALL 1
54 #define M2P_STATE_ON 2
55 #define M2P_STATE_NEXT 3
58 #define M2M_CONTROL 0x0000
59 #define M2M_CONTROL_DONEINT BIT(2)
60 #define M2M_CONTROL_ENABLE BIT(3)
61 #define M2M_CONTROL_START BIT(4)
62 #define M2M_CONTROL_DAH BIT(11)
63 #define M2M_CONTROL_SAH BIT(12)
64 #define M2M_CONTROL_PW_SHIFT 9
65 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
66 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
67 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_TM_SHIFT 13
70 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
71 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
72 #define M2M_CONTROL_RSS_SHIFT 22
73 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
74 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
75 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
76 #define M2M_CONTROL_NO_HDSK BIT(24)
77 #define M2M_CONTROL_PWSC_SHIFT 25
79 #define M2M_INTERRUPT 0x0004
80 #define M2M_INTERRUPT_DONEINT BIT(1)
82 #define M2M_BCR0 0x0010
83 #define M2M_BCR1 0x0014
84 #define M2M_SAR_BASE0 0x0018
85 #define M2M_SAR_BASE1 0x001c
86 #define M2M_DAR_BASE0 0x002c
87 #define M2M_DAR_BASE1 0x0030
89 #define DMA_MAX_CHAN_BYTES 0xffff
90 #define DMA_MAX_CHAN_DESCRIPTORS 32
92 struct ep93xx_dma_engine
;
95 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
96 * @src_addr: source address of the transaction
97 * @dst_addr: destination address of the transaction
98 * @size: size of the transaction (in bytes)
99 * @complete: this descriptor is completed
100 * @txd: dmaengine API descriptor
101 * @tx_list: list of linked descriptors
102 * @node: link used for putting this into a channel queue
104 struct ep93xx_dma_desc
{
109 struct dma_async_tx_descriptor txd
;
110 struct list_head tx_list
;
111 struct list_head node
;
115 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
116 * @chan: dmaengine API channel
117 * @edma: pointer to to the engine device
118 * @regs: memory mapped registers
119 * @irq: interrupt number of the channel
120 * @clk: clock used by this channel
121 * @tasklet: channel specific tasklet used for callbacks
122 * @lock: lock protecting the fields following
123 * @flags: flags for the channel
124 * @buffer: which buffer to use next (0/1)
125 * @active: flattened chain of descriptors currently being processed
126 * @queue: pending descriptors which are handled next
127 * @free_list: list of free descriptors which can be used
128 * @runtime_addr: physical address currently used as dest/src (M2M only). This
129 * is set via %DMA_SLAVE_CONFIG before slave operation is
131 * @runtime_ctrl: M2M runtime values for the control register.
133 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
134 * will have slightly different scheme here: @active points to a head of
135 * flattened DMA descriptor chain.
137 * @queue holds pending transactions. These are linked through the first
138 * descriptor in the chain. When a descriptor is moved to the @active queue,
139 * the first and chained descriptors are flattened into a single list.
141 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
142 * necessary channel configuration information. For memcpy channels this must
145 struct ep93xx_dma_chan
{
146 struct dma_chan chan
;
147 const struct ep93xx_dma_engine
*edma
;
151 struct tasklet_struct tasklet
;
152 /* protects the fields following */
155 /* Channel is configured for cyclic transfers */
156 #define EP93XX_DMA_IS_CYCLIC 0
159 struct list_head active
;
160 struct list_head queue
;
161 struct list_head free_list
;
167 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
168 * @dma_dev: holds the dmaengine device
169 * @m2m: is this an M2M or M2P device
170 * @hw_setup: method which sets the channel up for operation
171 * @hw_shutdown: shuts the channel down and flushes whatever is left
172 * @hw_submit: pushes active descriptor(s) to the hardware
173 * @hw_interrupt: handle the interrupt
174 * @num_channels: number of channels for this instance
175 * @channels: array of channels
177 * There is one instance of this struct for the M2P channels and one for the
178 * M2M channels. hw_xxx() methods are used to perform operations which are
179 * different on M2M and M2P channels. These methods are called with channel
180 * lock held and interrupts disabled so they cannot sleep.
182 struct ep93xx_dma_engine
{
183 struct dma_device dma_dev
;
185 int (*hw_setup
)(struct ep93xx_dma_chan
*);
186 void (*hw_shutdown
)(struct ep93xx_dma_chan
*);
187 void (*hw_submit
)(struct ep93xx_dma_chan
*);
188 int (*hw_interrupt
)(struct ep93xx_dma_chan
*);
189 #define INTERRUPT_UNKNOWN 0
190 #define INTERRUPT_DONE 1
191 #define INTERRUPT_NEXT_BUFFER 2
194 struct ep93xx_dma_chan channels
[];
197 static inline struct device
*chan2dev(struct ep93xx_dma_chan
*edmac
)
199 return &edmac
->chan
.dev
->device
;
202 static struct ep93xx_dma_chan
*to_ep93xx_dma_chan(struct dma_chan
*chan
)
204 return container_of(chan
, struct ep93xx_dma_chan
, chan
);
208 * ep93xx_dma_set_active - set new active descriptor chain
210 * @desc: head of the new active descriptor chain
212 * Sets @desc to be the head of the new active descriptor chain. This is the
213 * chain which is processed next. The active list must be empty before calling
216 * Called with @edmac->lock held and interrupts disabled.
218 static void ep93xx_dma_set_active(struct ep93xx_dma_chan
*edmac
,
219 struct ep93xx_dma_desc
*desc
)
221 BUG_ON(!list_empty(&edmac
->active
));
223 list_add_tail(&desc
->node
, &edmac
->active
);
225 /* Flatten the @desc->tx_list chain into @edmac->active list */
226 while (!list_empty(&desc
->tx_list
)) {
227 struct ep93xx_dma_desc
*d
= list_first_entry(&desc
->tx_list
,
228 struct ep93xx_dma_desc
, node
);
231 * We copy the callback parameters from the first descriptor
232 * to all the chained descriptors. This way we can call the
233 * callback without having to find out the first descriptor in
234 * the chain. Useful for cyclic transfers.
236 d
->txd
.callback
= desc
->txd
.callback
;
237 d
->txd
.callback_param
= desc
->txd
.callback_param
;
239 list_move_tail(&d
->node
, &edmac
->active
);
243 /* Called with @edmac->lock held and interrupts disabled */
244 static struct ep93xx_dma_desc
*
245 ep93xx_dma_get_active(struct ep93xx_dma_chan
*edmac
)
247 if (list_empty(&edmac
->active
))
250 return list_first_entry(&edmac
->active
, struct ep93xx_dma_desc
, node
);
254 * ep93xx_dma_advance_active - advances to the next active descriptor
257 * Function advances active descriptor to the next in the @edmac->active and
258 * returns %true if we still have descriptors in the chain to process.
259 * Otherwise returns %false.
261 * When the channel is in cyclic mode always returns %true.
263 * Called with @edmac->lock held and interrupts disabled.
265 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan
*edmac
)
267 struct ep93xx_dma_desc
*desc
;
269 list_rotate_left(&edmac
->active
);
271 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
274 desc
= ep93xx_dma_get_active(edmac
);
279 * If txd.cookie is set it means that we are back in the first
280 * descriptor in the chain and hence done with it.
282 return !desc
->txd
.cookie
;
286 * M2P DMA implementation
289 static void m2p_set_control(struct ep93xx_dma_chan
*edmac
, u32 control
)
291 writel(control
, edmac
->regs
+ M2P_CONTROL
);
293 * EP93xx User's Guide states that we must perform a dummy read after
294 * write to the control register.
296 readl(edmac
->regs
+ M2P_CONTROL
);
299 static int m2p_hw_setup(struct ep93xx_dma_chan
*edmac
)
301 struct ep93xx_dma_data
*data
= edmac
->chan
.private;
304 writel(data
->port
& 0xf, edmac
->regs
+ M2P_PPALLOC
);
306 control
= M2P_CONTROL_CH_ERROR_INT
| M2P_CONTROL_ICE
307 | M2P_CONTROL_ENABLE
;
308 m2p_set_control(edmac
, control
);
313 static inline u32
m2p_channel_state(struct ep93xx_dma_chan
*edmac
)
315 return (readl(edmac
->regs
+ M2P_STATUS
) >> 4) & 0x3;
318 static void m2p_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
322 control
= readl(edmac
->regs
+ M2P_CONTROL
);
323 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
324 m2p_set_control(edmac
, control
);
326 while (m2p_channel_state(edmac
) >= M2P_STATE_ON
)
329 m2p_set_control(edmac
, 0);
331 while (m2p_channel_state(edmac
) == M2P_STATE_STALL
)
335 static void m2p_fill_desc(struct ep93xx_dma_chan
*edmac
)
337 struct ep93xx_dma_desc
*desc
;
340 desc
= ep93xx_dma_get_active(edmac
);
342 dev_warn(chan2dev(edmac
), "M2P: empty descriptor list\n");
346 if (ep93xx_dma_chan_direction(&edmac
->chan
) == DMA_MEM_TO_DEV
)
347 bus_addr
= desc
->src_addr
;
349 bus_addr
= desc
->dst_addr
;
351 if (edmac
->buffer
== 0) {
352 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT0
);
353 writel(bus_addr
, edmac
->regs
+ M2P_BASE0
);
355 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT1
);
356 writel(bus_addr
, edmac
->regs
+ M2P_BASE1
);
362 static void m2p_hw_submit(struct ep93xx_dma_chan
*edmac
)
364 u32 control
= readl(edmac
->regs
+ M2P_CONTROL
);
366 m2p_fill_desc(edmac
);
367 control
|= M2P_CONTROL_STALLINT
;
369 if (ep93xx_dma_advance_active(edmac
)) {
370 m2p_fill_desc(edmac
);
371 control
|= M2P_CONTROL_NFBINT
;
374 m2p_set_control(edmac
, control
);
377 static int m2p_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
379 u32 irq_status
= readl(edmac
->regs
+ M2P_INTERRUPT
);
382 if (irq_status
& M2P_INTERRUPT_ERROR
) {
383 struct ep93xx_dma_desc
*desc
= ep93xx_dma_get_active(edmac
);
385 /* Clear the error interrupt */
386 writel(1, edmac
->regs
+ M2P_INTERRUPT
);
389 * It seems that there is no easy way of reporting errors back
390 * to client so we just report the error here and continue as
393 * Revisit this when there is a mechanism to report back the
396 dev_err(chan2dev(edmac
),
397 "DMA transfer failed! Details:\n"
399 "\tsrc_addr : 0x%08x\n"
400 "\tdst_addr : 0x%08x\n"
402 desc
->txd
.cookie
, desc
->src_addr
, desc
->dst_addr
,
406 switch (irq_status
& (M2P_INTERRUPT_STALL
| M2P_INTERRUPT_NFB
)) {
407 case M2P_INTERRUPT_STALL
:
408 /* Disable interrupts */
409 control
= readl(edmac
->regs
+ M2P_CONTROL
);
410 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
411 m2p_set_control(edmac
, control
);
413 return INTERRUPT_DONE
;
415 case M2P_INTERRUPT_NFB
:
416 if (ep93xx_dma_advance_active(edmac
))
417 m2p_fill_desc(edmac
);
419 return INTERRUPT_NEXT_BUFFER
;
422 return INTERRUPT_UNKNOWN
;
426 * M2M DMA implementation
428 * For the M2M transfers we don't use NFB at all. This is because it simply
429 * doesn't work well with memcpy transfers. When you submit both buffers it is
430 * extremely unlikely that you get an NFB interrupt, but it instead reports
431 * DONE interrupt and both buffers are already transferred which means that we
432 * weren't able to update the next buffer.
434 * So for now we "simulate" NFB by just submitting buffer after buffer
435 * without double buffering.
438 static int m2m_hw_setup(struct ep93xx_dma_chan
*edmac
)
440 const struct ep93xx_dma_data
*data
= edmac
->chan
.private;
444 /* This is memcpy channel, nothing to configure */
445 writel(control
, edmac
->regs
+ M2M_CONTROL
);
449 switch (data
->port
) {
452 * This was found via experimenting - anything less than 5
453 * causes the channel to perform only a partial transfer which
454 * leads to problems since we don't get DONE interrupt then.
456 control
= (5 << M2M_CONTROL_PWSC_SHIFT
);
457 control
|= M2M_CONTROL_NO_HDSK
;
459 if (data
->direction
== DMA_MEM_TO_DEV
) {
460 control
|= M2M_CONTROL_DAH
;
461 control
|= M2M_CONTROL_TM_TX
;
462 control
|= M2M_CONTROL_RSS_SSPTX
;
464 control
|= M2M_CONTROL_SAH
;
465 control
|= M2M_CONTROL_TM_RX
;
466 control
|= M2M_CONTROL_RSS_SSPRX
;
472 * This IDE part is totally untested. Values below are taken
473 * from the EP93xx Users's Guide and might not be correct.
475 if (data
->direction
== DMA_MEM_TO_DEV
) {
476 /* Worst case from the UG */
477 control
= (3 << M2M_CONTROL_PWSC_SHIFT
);
478 control
|= M2M_CONTROL_DAH
;
479 control
|= M2M_CONTROL_TM_TX
;
481 control
= (2 << M2M_CONTROL_PWSC_SHIFT
);
482 control
|= M2M_CONTROL_SAH
;
483 control
|= M2M_CONTROL_TM_RX
;
486 control
|= M2M_CONTROL_NO_HDSK
;
487 control
|= M2M_CONTROL_RSS_IDE
;
488 control
|= M2M_CONTROL_PW_16
;
495 writel(control
, edmac
->regs
+ M2M_CONTROL
);
499 static void m2m_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
501 /* Just disable the channel */
502 writel(0, edmac
->regs
+ M2M_CONTROL
);
505 static void m2m_fill_desc(struct ep93xx_dma_chan
*edmac
)
507 struct ep93xx_dma_desc
*desc
;
509 desc
= ep93xx_dma_get_active(edmac
);
511 dev_warn(chan2dev(edmac
), "M2M: empty descriptor list\n");
515 if (edmac
->buffer
== 0) {
516 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE0
);
517 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE0
);
518 writel(desc
->size
, edmac
->regs
+ M2M_BCR0
);
520 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE1
);
521 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE1
);
522 writel(desc
->size
, edmac
->regs
+ M2M_BCR1
);
528 static void m2m_hw_submit(struct ep93xx_dma_chan
*edmac
)
530 struct ep93xx_dma_data
*data
= edmac
->chan
.private;
531 u32 control
= readl(edmac
->regs
+ M2M_CONTROL
);
534 * Since we allow clients to configure PW (peripheral width) we always
535 * clear PW bits here and then set them according what is given in
536 * the runtime configuration.
538 control
&= ~M2M_CONTROL_PW_MASK
;
539 control
|= edmac
->runtime_ctrl
;
541 m2m_fill_desc(edmac
);
542 control
|= M2M_CONTROL_DONEINT
;
545 * Now we can finally enable the channel. For M2M channel this must be
546 * done _after_ the BCRx registers are programmed.
548 control
|= M2M_CONTROL_ENABLE
;
549 writel(control
, edmac
->regs
+ M2M_CONTROL
);
553 * For memcpy channels the software trigger must be asserted
554 * in order to start the memcpy operation.
556 control
|= M2M_CONTROL_START
;
557 writel(control
, edmac
->regs
+ M2M_CONTROL
);
561 static int m2m_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
565 if (!(readl(edmac
->regs
+ M2M_INTERRUPT
) & M2M_INTERRUPT_DONEINT
))
566 return INTERRUPT_UNKNOWN
;
568 /* Clear the DONE bit */
569 writel(0, edmac
->regs
+ M2M_INTERRUPT
);
571 /* Disable interrupts and the channel */
572 control
= readl(edmac
->regs
+ M2M_CONTROL
);
573 control
&= ~(M2M_CONTROL_DONEINT
| M2M_CONTROL_ENABLE
);
574 writel(control
, edmac
->regs
+ M2M_CONTROL
);
577 * Since we only get DONE interrupt we have to find out ourselves
578 * whether there still is something to process. So we try to advance
579 * the chain an see whether it succeeds.
581 if (ep93xx_dma_advance_active(edmac
)) {
582 edmac
->edma
->hw_submit(edmac
);
583 return INTERRUPT_NEXT_BUFFER
;
586 return INTERRUPT_DONE
;
590 * DMA engine API implementation
593 static struct ep93xx_dma_desc
*
594 ep93xx_dma_desc_get(struct ep93xx_dma_chan
*edmac
)
596 struct ep93xx_dma_desc
*desc
, *_desc
;
597 struct ep93xx_dma_desc
*ret
= NULL
;
600 spin_lock_irqsave(&edmac
->lock
, flags
);
601 list_for_each_entry_safe(desc
, _desc
, &edmac
->free_list
, node
) {
602 if (async_tx_test_ack(&desc
->txd
)) {
603 list_del_init(&desc
->node
);
605 /* Re-initialize the descriptor */
609 desc
->complete
= false;
610 desc
->txd
.cookie
= 0;
611 desc
->txd
.callback
= NULL
;
612 desc
->txd
.callback_param
= NULL
;
618 spin_unlock_irqrestore(&edmac
->lock
, flags
);
622 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan
*edmac
,
623 struct ep93xx_dma_desc
*desc
)
628 spin_lock_irqsave(&edmac
->lock
, flags
);
629 list_splice_init(&desc
->tx_list
, &edmac
->free_list
);
630 list_add(&desc
->node
, &edmac
->free_list
);
631 spin_unlock_irqrestore(&edmac
->lock
, flags
);
636 * ep93xx_dma_advance_work - start processing the next pending transaction
639 * If we have pending transactions queued and we are currently idling, this
640 * function takes the next queued transaction from the @edmac->queue and
641 * pushes it to the hardware for execution.
643 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan
*edmac
)
645 struct ep93xx_dma_desc
*new;
648 spin_lock_irqsave(&edmac
->lock
, flags
);
649 if (!list_empty(&edmac
->active
) || list_empty(&edmac
->queue
)) {
650 spin_unlock_irqrestore(&edmac
->lock
, flags
);
654 /* Take the next descriptor from the pending queue */
655 new = list_first_entry(&edmac
->queue
, struct ep93xx_dma_desc
, node
);
656 list_del_init(&new->node
);
658 ep93xx_dma_set_active(edmac
, new);
660 /* Push it to the hardware */
661 edmac
->edma
->hw_submit(edmac
);
662 spin_unlock_irqrestore(&edmac
->lock
, flags
);
665 static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc
*desc
)
667 struct device
*dev
= desc
->txd
.chan
->device
->dev
;
669 if (!(desc
->txd
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
670 if (desc
->txd
.flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
671 dma_unmap_single(dev
, desc
->src_addr
, desc
->size
,
674 dma_unmap_page(dev
, desc
->src_addr
, desc
->size
,
677 if (!(desc
->txd
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
678 if (desc
->txd
.flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
679 dma_unmap_single(dev
, desc
->dst_addr
, desc
->size
,
682 dma_unmap_page(dev
, desc
->dst_addr
, desc
->size
,
687 static void ep93xx_dma_tasklet(unsigned long data
)
689 struct ep93xx_dma_chan
*edmac
= (struct ep93xx_dma_chan
*)data
;
690 struct ep93xx_dma_desc
*desc
, *d
;
691 dma_async_tx_callback callback
= NULL
;
692 void *callback_param
= NULL
;
695 spin_lock_irq(&edmac
->lock
);
697 * If dma_terminate_all() was called before we get to run, the active
698 * list has become empty. If that happens we aren't supposed to do
699 * anything more than call ep93xx_dma_advance_work().
701 desc
= ep93xx_dma_get_active(edmac
);
703 if (desc
->complete
) {
704 edmac
->chan
.completed_cookie
= desc
->txd
.cookie
;
705 list_splice_init(&edmac
->active
, &list
);
707 callback
= desc
->txd
.callback
;
708 callback_param
= desc
->txd
.callback_param
;
710 spin_unlock_irq(&edmac
->lock
);
712 /* Pick up the next descriptor from the queue */
713 ep93xx_dma_advance_work(edmac
);
715 /* Now we can release all the chained descriptors */
716 list_for_each_entry_safe(desc
, d
, &list
, node
) {
718 * For the memcpy channels the API requires us to unmap the
719 * buffers unless requested otherwise.
721 if (!edmac
->chan
.private)
722 ep93xx_dma_unmap_buffers(desc
);
724 ep93xx_dma_desc_put(edmac
, desc
);
728 callback(callback_param
);
731 static irqreturn_t
ep93xx_dma_interrupt(int irq
, void *dev_id
)
733 struct ep93xx_dma_chan
*edmac
= dev_id
;
734 struct ep93xx_dma_desc
*desc
;
735 irqreturn_t ret
= IRQ_HANDLED
;
737 spin_lock(&edmac
->lock
);
739 desc
= ep93xx_dma_get_active(edmac
);
741 dev_warn(chan2dev(edmac
),
742 "got interrupt while active list is empty\n");
743 spin_unlock(&edmac
->lock
);
747 switch (edmac
->edma
->hw_interrupt(edmac
)) {
749 desc
->complete
= true;
750 tasklet_schedule(&edmac
->tasklet
);
753 case INTERRUPT_NEXT_BUFFER
:
754 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
755 tasklet_schedule(&edmac
->tasklet
);
759 dev_warn(chan2dev(edmac
), "unknown interrupt!\n");
764 spin_unlock(&edmac
->lock
);
769 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
770 * @tx: descriptor to be executed
772 * Function will execute given descriptor on the hardware or if the hardware
773 * is busy, queue the descriptor to be executed later on. Returns cookie which
774 * can be used to poll the status of the descriptor.
776 static dma_cookie_t
ep93xx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
778 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(tx
->chan
);
779 struct ep93xx_dma_desc
*desc
;
783 spin_lock_irqsave(&edmac
->lock
, flags
);
785 cookie
= edmac
->chan
.cookie
;
790 desc
= container_of(tx
, struct ep93xx_dma_desc
, txd
);
792 edmac
->chan
.cookie
= cookie
;
793 desc
->txd
.cookie
= cookie
;
796 * If nothing is currently prosessed, we push this descriptor
797 * directly to the hardware. Otherwise we put the descriptor
798 * to the pending queue.
800 if (list_empty(&edmac
->active
)) {
801 ep93xx_dma_set_active(edmac
, desc
);
802 edmac
->edma
->hw_submit(edmac
);
804 list_add_tail(&desc
->node
, &edmac
->queue
);
807 spin_unlock_irqrestore(&edmac
->lock
, flags
);
812 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
813 * @chan: channel to allocate resources
815 * Function allocates necessary resources for the given DMA channel and
816 * returns number of allocated descriptors for the channel. Negative errno
817 * is returned in case of failure.
819 static int ep93xx_dma_alloc_chan_resources(struct dma_chan
*chan
)
821 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
822 struct ep93xx_dma_data
*data
= chan
->private;
823 const char *name
= dma_chan_name(chan
);
826 /* Sanity check the channel parameters */
827 if (!edmac
->edma
->m2m
) {
830 if (data
->port
< EP93XX_DMA_I2S1
||
831 data
->port
> EP93XX_DMA_IRDA
)
833 if (data
->direction
!= ep93xx_dma_chan_direction(chan
))
837 switch (data
->port
) {
840 if (data
->direction
!= DMA_MEM_TO_DEV
&&
841 data
->direction
!= DMA_DEV_TO_MEM
)
850 if (data
&& data
->name
)
853 ret
= clk_enable(edmac
->clk
);
857 ret
= request_irq(edmac
->irq
, ep93xx_dma_interrupt
, 0, name
, edmac
);
859 goto fail_clk_disable
;
861 spin_lock_irq(&edmac
->lock
);
862 edmac
->chan
.completed_cookie
= 1;
863 edmac
->chan
.cookie
= 1;
864 ret
= edmac
->edma
->hw_setup(edmac
);
865 spin_unlock_irq(&edmac
->lock
);
870 for (i
= 0; i
< DMA_MAX_CHAN_DESCRIPTORS
; i
++) {
871 struct ep93xx_dma_desc
*desc
;
873 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
875 dev_warn(chan2dev(edmac
), "not enough descriptors\n");
879 INIT_LIST_HEAD(&desc
->tx_list
);
881 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
882 desc
->txd
.flags
= DMA_CTRL_ACK
;
883 desc
->txd
.tx_submit
= ep93xx_dma_tx_submit
;
885 ep93xx_dma_desc_put(edmac
, desc
);
891 free_irq(edmac
->irq
, edmac
);
893 clk_disable(edmac
->clk
);
899 * ep93xx_dma_free_chan_resources - release resources for the channel
902 * Function releases all the resources allocated for the given channel.
903 * The channel must be idle when this is called.
905 static void ep93xx_dma_free_chan_resources(struct dma_chan
*chan
)
907 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
908 struct ep93xx_dma_desc
*desc
, *d
;
912 BUG_ON(!list_empty(&edmac
->active
));
913 BUG_ON(!list_empty(&edmac
->queue
));
915 spin_lock_irqsave(&edmac
->lock
, flags
);
916 edmac
->edma
->hw_shutdown(edmac
);
917 edmac
->runtime_addr
= 0;
918 edmac
->runtime_ctrl
= 0;
920 list_splice_init(&edmac
->free_list
, &list
);
921 spin_unlock_irqrestore(&edmac
->lock
, flags
);
923 list_for_each_entry_safe(desc
, d
, &list
, node
)
926 clk_disable(edmac
->clk
);
927 free_irq(edmac
->irq
, edmac
);
931 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
933 * @dest: destination bus address
934 * @src: source bus address
935 * @len: size of the transaction
936 * @flags: flags for the descriptor
938 * Returns a valid DMA descriptor or %NULL in case of failure.
940 static struct dma_async_tx_descriptor
*
941 ep93xx_dma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
,
942 dma_addr_t src
, size_t len
, unsigned long flags
)
944 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
945 struct ep93xx_dma_desc
*desc
, *first
;
946 size_t bytes
, offset
;
949 for (offset
= 0; offset
< len
; offset
+= bytes
) {
950 desc
= ep93xx_dma_desc_get(edmac
);
952 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
956 bytes
= min_t(size_t, len
- offset
, DMA_MAX_CHAN_BYTES
);
958 desc
->src_addr
= src
+ offset
;
959 desc
->dst_addr
= dest
+ offset
;
965 list_add_tail(&desc
->node
, &first
->tx_list
);
968 first
->txd
.cookie
= -EBUSY
;
969 first
->txd
.flags
= flags
;
973 ep93xx_dma_desc_put(edmac
, first
);
978 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
980 * @sgl: list of buffers to transfer
981 * @sg_len: number of entries in @sgl
982 * @dir: direction of tha DMA transfer
983 * @flags: flags for the descriptor
985 * Returns a valid DMA descriptor or %NULL in case of failure.
987 static struct dma_async_tx_descriptor
*
988 ep93xx_dma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
989 unsigned int sg_len
, enum dma_transfer_direction dir
,
992 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
993 struct ep93xx_dma_desc
*desc
, *first
;
994 struct scatterlist
*sg
;
997 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
998 dev_warn(chan2dev(edmac
),
999 "channel was configured with different direction\n");
1003 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1004 dev_warn(chan2dev(edmac
),
1005 "channel is already used for cyclic transfers\n");
1010 for_each_sg(sgl
, sg
, sg_len
, i
) {
1011 size_t sg_len
= sg_dma_len(sg
);
1013 if (sg_len
> DMA_MAX_CHAN_BYTES
) {
1014 dev_warn(chan2dev(edmac
), "too big transfer size %d\n",
1019 desc
= ep93xx_dma_desc_get(edmac
);
1021 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
1025 if (dir
== DMA_MEM_TO_DEV
) {
1026 desc
->src_addr
= sg_dma_address(sg
);
1027 desc
->dst_addr
= edmac
->runtime_addr
;
1029 desc
->src_addr
= edmac
->runtime_addr
;
1030 desc
->dst_addr
= sg_dma_address(sg
);
1032 desc
->size
= sg_len
;
1037 list_add_tail(&desc
->node
, &first
->tx_list
);
1040 first
->txd
.cookie
= -EBUSY
;
1041 first
->txd
.flags
= flags
;
1046 ep93xx_dma_desc_put(edmac
, first
);
1051 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1053 * @dma_addr: DMA mapped address of the buffer
1054 * @buf_len: length of the buffer (in bytes)
1055 * @period_len: lenght of a single period
1056 * @dir: direction of the operation
1058 * Prepares a descriptor for cyclic DMA operation. This means that once the
1059 * descriptor is submitted, we will be submitting in a @period_len sized
1060 * buffers and calling callback once the period has been elapsed. Transfer
1061 * terminates only when client calls dmaengine_terminate_all() for this
1064 * Returns a valid DMA descriptor or %NULL in case of failure.
1066 static struct dma_async_tx_descriptor
*
1067 ep93xx_dma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
1068 size_t buf_len
, size_t period_len
,
1069 enum dma_transfer_direction dir
)
1071 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1072 struct ep93xx_dma_desc
*desc
, *first
;
1075 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
1076 dev_warn(chan2dev(edmac
),
1077 "channel was configured with different direction\n");
1081 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1082 dev_warn(chan2dev(edmac
),
1083 "channel is already used for cyclic transfers\n");
1087 if (period_len
> DMA_MAX_CHAN_BYTES
) {
1088 dev_warn(chan2dev(edmac
), "too big period length %d\n",
1093 /* Split the buffer into period size chunks */
1095 for (offset
= 0; offset
< buf_len
; offset
+= period_len
) {
1096 desc
= ep93xx_dma_desc_get(edmac
);
1098 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
1102 if (dir
== DMA_MEM_TO_DEV
) {
1103 desc
->src_addr
= dma_addr
+ offset
;
1104 desc
->dst_addr
= edmac
->runtime_addr
;
1106 desc
->src_addr
= edmac
->runtime_addr
;
1107 desc
->dst_addr
= dma_addr
+ offset
;
1110 desc
->size
= period_len
;
1115 list_add_tail(&desc
->node
, &first
->tx_list
);
1118 first
->txd
.cookie
= -EBUSY
;
1123 ep93xx_dma_desc_put(edmac
, first
);
1128 * ep93xx_dma_terminate_all - terminate all transactions
1131 * Stops all DMA transactions. All descriptors are put back to the
1132 * @edmac->free_list and callbacks are _not_ called.
1134 static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan
*edmac
)
1136 struct ep93xx_dma_desc
*desc
, *_d
;
1137 unsigned long flags
;
1140 spin_lock_irqsave(&edmac
->lock
, flags
);
1141 /* First we disable and flush the DMA channel */
1142 edmac
->edma
->hw_shutdown(edmac
);
1143 clear_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
);
1144 list_splice_init(&edmac
->active
, &list
);
1145 list_splice_init(&edmac
->queue
, &list
);
1147 * We then re-enable the channel. This way we can continue submitting
1148 * the descriptors by just calling ->hw_submit() again.
1150 edmac
->edma
->hw_setup(edmac
);
1151 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1153 list_for_each_entry_safe(desc
, _d
, &list
, node
)
1154 ep93xx_dma_desc_put(edmac
, desc
);
1159 static int ep93xx_dma_slave_config(struct ep93xx_dma_chan
*edmac
,
1160 struct dma_slave_config
*config
)
1162 enum dma_slave_buswidth width
;
1163 unsigned long flags
;
1166 if (!edmac
->edma
->m2m
)
1169 switch (config
->direction
) {
1170 case DMA_DEV_TO_MEM
:
1171 width
= config
->src_addr_width
;
1172 addr
= config
->src_addr
;
1175 case DMA_MEM_TO_DEV
:
1176 width
= config
->dst_addr_width
;
1177 addr
= config
->dst_addr
;
1185 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1188 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1189 ctrl
= M2M_CONTROL_PW_16
;
1191 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1192 ctrl
= M2M_CONTROL_PW_32
;
1198 spin_lock_irqsave(&edmac
->lock
, flags
);
1199 edmac
->runtime_addr
= addr
;
1200 edmac
->runtime_ctrl
= ctrl
;
1201 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1207 * ep93xx_dma_control - manipulate all pending operations on a channel
1209 * @cmd: control command to perform
1210 * @arg: optional argument
1212 * Controls the channel. Function returns %0 in case of success or negative
1213 * error in case of failure.
1215 static int ep93xx_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1218 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1219 struct dma_slave_config
*config
;
1222 case DMA_TERMINATE_ALL
:
1223 return ep93xx_dma_terminate_all(edmac
);
1225 case DMA_SLAVE_CONFIG
:
1226 config
= (struct dma_slave_config
*)arg
;
1227 return ep93xx_dma_slave_config(edmac
, config
);
1237 * ep93xx_dma_tx_status - check if a transaction is completed
1239 * @cookie: transaction specific cookie
1240 * @state: state of the transaction is stored here if given
1242 * This function can be used to query state of a given transaction.
1244 static enum dma_status
ep93xx_dma_tx_status(struct dma_chan
*chan
,
1245 dma_cookie_t cookie
,
1246 struct dma_tx_state
*state
)
1248 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1249 dma_cookie_t last_used
, last_completed
;
1250 enum dma_status ret
;
1251 unsigned long flags
;
1253 spin_lock_irqsave(&edmac
->lock
, flags
);
1254 last_used
= chan
->cookie
;
1255 last_completed
= chan
->completed_cookie
;
1256 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1258 ret
= dma_async_is_complete(cookie
, last_completed
, last_used
);
1259 dma_set_tx_state(state
, last_completed
, last_used
, 0);
1265 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1268 * When this function is called, all pending transactions are pushed to the
1269 * hardware and executed.
1271 static void ep93xx_dma_issue_pending(struct dma_chan
*chan
)
1273 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan
));
1276 static int __init
ep93xx_dma_probe(struct platform_device
*pdev
)
1278 struct ep93xx_dma_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1279 struct ep93xx_dma_engine
*edma
;
1280 struct dma_device
*dma_dev
;
1284 edma_size
= pdata
->num_channels
* sizeof(struct ep93xx_dma_chan
);
1285 edma
= kzalloc(sizeof(*edma
) + edma_size
, GFP_KERNEL
);
1289 dma_dev
= &edma
->dma_dev
;
1290 edma
->m2m
= platform_get_device_id(pdev
)->driver_data
;
1291 edma
->num_channels
= pdata
->num_channels
;
1293 INIT_LIST_HEAD(&dma_dev
->channels
);
1294 for (i
= 0; i
< pdata
->num_channels
; i
++) {
1295 const struct ep93xx_dma_chan_data
*cdata
= &pdata
->channels
[i
];
1296 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1298 edmac
->chan
.device
= dma_dev
;
1299 edmac
->regs
= cdata
->base
;
1300 edmac
->irq
= cdata
->irq
;
1303 edmac
->clk
= clk_get(NULL
, cdata
->name
);
1304 if (IS_ERR(edmac
->clk
)) {
1305 dev_warn(&pdev
->dev
, "failed to get clock for %s\n",
1310 spin_lock_init(&edmac
->lock
);
1311 INIT_LIST_HEAD(&edmac
->active
);
1312 INIT_LIST_HEAD(&edmac
->queue
);
1313 INIT_LIST_HEAD(&edmac
->free_list
);
1314 tasklet_init(&edmac
->tasklet
, ep93xx_dma_tasklet
,
1315 (unsigned long)edmac
);
1317 list_add_tail(&edmac
->chan
.device_node
,
1318 &dma_dev
->channels
);
1321 dma_cap_zero(dma_dev
->cap_mask
);
1322 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
1323 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
1325 dma_dev
->dev
= &pdev
->dev
;
1326 dma_dev
->device_alloc_chan_resources
= ep93xx_dma_alloc_chan_resources
;
1327 dma_dev
->device_free_chan_resources
= ep93xx_dma_free_chan_resources
;
1328 dma_dev
->device_prep_slave_sg
= ep93xx_dma_prep_slave_sg
;
1329 dma_dev
->device_prep_dma_cyclic
= ep93xx_dma_prep_dma_cyclic
;
1330 dma_dev
->device_control
= ep93xx_dma_control
;
1331 dma_dev
->device_issue_pending
= ep93xx_dma_issue_pending
;
1332 dma_dev
->device_tx_status
= ep93xx_dma_tx_status
;
1334 dma_set_max_seg_size(dma_dev
->dev
, DMA_MAX_CHAN_BYTES
);
1337 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1338 dma_dev
->device_prep_dma_memcpy
= ep93xx_dma_prep_dma_memcpy
;
1340 edma
->hw_setup
= m2m_hw_setup
;
1341 edma
->hw_shutdown
= m2m_hw_shutdown
;
1342 edma
->hw_submit
= m2m_hw_submit
;
1343 edma
->hw_interrupt
= m2m_hw_interrupt
;
1345 dma_cap_set(DMA_PRIVATE
, dma_dev
->cap_mask
);
1347 edma
->hw_setup
= m2p_hw_setup
;
1348 edma
->hw_shutdown
= m2p_hw_shutdown
;
1349 edma
->hw_submit
= m2p_hw_submit
;
1350 edma
->hw_interrupt
= m2p_hw_interrupt
;
1353 ret
= dma_async_device_register(dma_dev
);
1354 if (unlikely(ret
)) {
1355 for (i
= 0; i
< edma
->num_channels
; i
++) {
1356 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1357 if (!IS_ERR_OR_NULL(edmac
->clk
))
1358 clk_put(edmac
->clk
);
1362 dev_info(dma_dev
->dev
, "EP93xx M2%s DMA ready\n",
1363 edma
->m2m
? "M" : "P");
1369 static struct platform_device_id ep93xx_dma_driver_ids
[] = {
1370 { "ep93xx-dma-m2p", 0 },
1371 { "ep93xx-dma-m2m", 1 },
1375 static struct platform_driver ep93xx_dma_driver
= {
1377 .name
= "ep93xx-dma",
1379 .id_table
= ep93xx_dma_driver_ids
,
1382 static int __init
ep93xx_dma_module_init(void)
1384 return platform_driver_probe(&ep93xx_dma_driver
, ep93xx_dma_probe
);
1386 subsys_initcall(ep93xx_dma_module_init
);
1388 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1389 MODULE_DESCRIPTION("EP93xx DMA driver");
1390 MODULE_LICENSE("GPL");