2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "dmaengine.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
111 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
112 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
113 #define CHMAP_EXIST BIT(24)
116 * Max of 20 segments per channel to conserve PaRAM slots
117 * Also note that MAX_NR_SG should be atleast the no.of periods
118 * that are required for ASoC, otherwise DMA prep calls will
119 * fail. Today davinci-pcm is the only user of this driver and
120 * requires atleast 17 slots, so we setup the default to 20.
123 #define EDMA_MAX_SLOTS MAX_NR_SG
124 #define EDMA_DESCRIPTORS 16
126 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
127 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
128 #define EDMA_CONT_PARAMS_ANY 1001
129 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
130 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
132 /* PaRAM slots are laid out like this */
133 struct edmacc_param
{
144 /* fields in edmacc_param.opt */
147 #define SYNCDIM BIT(2)
148 #define STATIC BIT(3)
149 #define EDMA_FWID (0x07 << 8)
150 #define TCCMODE BIT(11)
151 #define EDMA_TCC(t) ((t) << 12)
152 #define TCINTEN BIT(20)
153 #define ITCINTEN BIT(21)
154 #define TCCHEN BIT(22)
155 #define ITCCHEN BIT(23)
157 /*ch_status parameter of callback function possible values*/
158 #define EDMA_DMA_COMPLETE 1
159 #define EDMA_DMA_CC_ERROR 2
160 #define EDMA_DMA_TC1_ERROR 3
161 #define EDMA_DMA_TC2_ERROR 4
166 struct edmacc_param param
;
170 struct virt_dma_desc vdesc
;
171 struct list_head node
;
172 enum dma_transfer_direction direction
;
176 struct edma_chan
*echan
;
180 * The following 4 elements are used for residue accounting.
182 * - processed_stat: the number of SG elements we have traversed
183 * so far to cover accounting. This is updated directly to processed
184 * during edma_callback and is always <= processed, because processed
185 * refers to the number of pending transfer (programmed to EDMA
186 * controller), where as processed_stat tracks number of transfers
187 * accounted for so far.
189 * - residue: The amount of bytes we have left to transfer for this desc
191 * - residue_stat: The residue in bytes of data we have covered
192 * so far for accounting. This is updated directly to residue
193 * during callbacks to keep it current.
195 * - sg_len: Tracks the length of the current intermediate transfer,
196 * this is required to update the residue during intermediate transfer
197 * completion callback.
204 struct edma_pset pset
[0];
210 struct virt_dma_chan vchan
;
211 struct list_head node
;
212 struct edma_desc
*edesc
;
216 int slot
[EDMA_MAX_SLOTS
];
218 struct dma_slave_config cfg
;
223 struct edma_soc_info
*info
;
227 /* eDMA3 resource information */
228 unsigned num_channels
;
232 enum dma_event_q default_queue
;
234 bool unused_chan_list_done
;
235 /* The edma_inuse bit for each PaRAM slot is clear unless the
236 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
238 unsigned long *edma_inuse
;
240 /* The edma_unused bit for each channel is clear unless
241 * it is not being used on this platform. It uses a bit
242 * of SOC-specific initialization code.
244 unsigned long *edma_unused
;
246 struct dma_interrupt_data
{
247 void (*callback
)(unsigned channel
, unsigned short ch_status
,
252 struct dma_device dma_slave
;
253 struct edma_chan
*slave_chans
;
257 /* dummy param set used to (re)initialize parameter RAM slots */
258 static const struct edmacc_param dummy_paramset
= {
259 .link_bcntrld
= 0xffff,
263 static const struct of_device_id edma_of_ids
[] = {
264 { .compatible
= "ti,edma3", },
268 static inline unsigned int edma_read(struct edma_cc
*ecc
, int offset
)
270 return (unsigned int)__raw_readl(ecc
->base
+ offset
);
273 static inline void edma_write(struct edma_cc
*ecc
, int offset
, int val
)
275 __raw_writel(val
, ecc
->base
+ offset
);
278 static inline void edma_modify(struct edma_cc
*ecc
, int offset
, unsigned and,
281 unsigned val
= edma_read(ecc
, offset
);
285 edma_write(ecc
, offset
, val
);
288 static inline void edma_and(struct edma_cc
*ecc
, int offset
, unsigned and)
290 unsigned val
= edma_read(ecc
, offset
);
293 edma_write(ecc
, offset
, val
);
296 static inline void edma_or(struct edma_cc
*ecc
, int offset
, unsigned or)
298 unsigned val
= edma_read(ecc
, offset
);
301 edma_write(ecc
, offset
, val
);
304 static inline unsigned int edma_read_array(struct edma_cc
*ecc
, int offset
,
307 return edma_read(ecc
, offset
+ (i
<< 2));
310 static inline void edma_write_array(struct edma_cc
*ecc
, int offset
, int i
,
313 edma_write(ecc
, offset
+ (i
<< 2), val
);
316 static inline void edma_modify_array(struct edma_cc
*ecc
, int offset
, int i
,
317 unsigned and, unsigned or)
319 edma_modify(ecc
, offset
+ (i
<< 2), and, or);
322 static inline void edma_or_array(struct edma_cc
*ecc
, int offset
, int i
,
325 edma_or(ecc
, offset
+ (i
<< 2), or);
328 static inline void edma_or_array2(struct edma_cc
*ecc
, int offset
, int i
, int j
,
331 edma_or(ecc
, offset
+ ((i
* 2 + j
) << 2), or);
334 static inline void edma_write_array2(struct edma_cc
*ecc
, int offset
, int i
,
337 edma_write(ecc
, offset
+ ((i
* 2 + j
) << 2), val
);
340 static inline unsigned int edma_shadow0_read(struct edma_cc
*ecc
, int offset
)
342 return edma_read(ecc
, EDMA_SHADOW0
+ offset
);
345 static inline unsigned int edma_shadow0_read_array(struct edma_cc
*ecc
,
348 return edma_read(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
351 static inline void edma_shadow0_write(struct edma_cc
*ecc
, int offset
,
354 edma_write(ecc
, EDMA_SHADOW0
+ offset
, val
);
357 static inline void edma_shadow0_write_array(struct edma_cc
*ecc
, int offset
,
360 edma_write(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
363 static inline unsigned int edma_parm_read(struct edma_cc
*ecc
, int offset
,
366 return edma_read(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5));
369 static inline void edma_parm_write(struct edma_cc
*ecc
, int offset
,
370 int param_no
, unsigned val
)
372 edma_write(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
375 static inline void edma_parm_modify(struct edma_cc
*ecc
, int offset
,
376 int param_no
, unsigned and, unsigned or)
378 edma_modify(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
381 static inline void edma_parm_and(struct edma_cc
*ecc
, int offset
, int param_no
,
384 edma_and(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
387 static inline void edma_parm_or(struct edma_cc
*ecc
, int offset
, int param_no
,
390 edma_or(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
393 static inline void set_bits(int offset
, int len
, unsigned long *p
)
395 for (; len
> 0; len
--)
396 set_bit(offset
+ (len
- 1), p
);
399 static inline void clear_bits(int offset
, int len
, unsigned long *p
)
401 for (; len
> 0; len
--)
402 clear_bit(offset
+ (len
- 1), p
);
405 static void edma_map_dmach_to_queue(struct edma_cc
*ecc
, unsigned ch_no
,
406 enum dma_event_q queue_no
)
408 int bit
= (ch_no
& 0x7) * 4;
410 /* default to low priority queue */
411 if (queue_no
== EVENTQ_DEFAULT
)
412 queue_no
= ecc
->default_queue
;
415 edma_modify_array(ecc
, EDMA_DMAQNUM
, (ch_no
>> 3), ~(0x7 << bit
),
419 static void edma_assign_priority_to_queue(struct edma_cc
*ecc
, int queue_no
,
422 int bit
= queue_no
* 4;
424 edma_modify(ecc
, EDMA_QUEPRI
, ~(0x7 << bit
), ((priority
& 0x7) << bit
));
427 static void edma_direct_dmach_to_param_mapping(struct edma_cc
*ecc
)
431 for (i
= 0; i
< ecc
->num_channels
; i
++)
432 edma_write_array(ecc
, EDMA_DCHMAP
, i
, (i
<< 5));
435 static int prepare_unused_channel_list(struct device
*dev
, void *data
)
437 struct platform_device
*pdev
= to_platform_device(dev
);
438 struct edma_cc
*ecc
= data
;
439 int dma_req_min
= EDMA_CTLR_CHAN(ecc
->id
, 0);
440 int dma_req_max
= dma_req_min
+ ecc
->num_channels
;
442 struct of_phandle_args dma_spec
;
445 struct platform_device
*dma_pdev
;
447 count
= of_property_count_strings(dev
->of_node
, "dma-names");
450 for (i
= 0; i
< count
; i
++) {
451 if (of_parse_phandle_with_args(dev
->of_node
, "dmas",
456 if (!of_match_node(edma_of_ids
, dma_spec
.np
)) {
457 of_node_put(dma_spec
.np
);
461 dma_pdev
= of_find_device_by_node(dma_spec
.np
);
462 if (&dma_pdev
->dev
!= ecc
->dev
)
465 clear_bit(EDMA_CHAN_SLOT(dma_spec
.args
[0]),
467 of_node_put(dma_spec
.np
);
472 /* For non-OF case */
473 for (i
= 0; i
< pdev
->num_resources
; i
++) {
474 struct resource
*res
= &pdev
->resource
[i
];
477 if (!(res
->flags
& IORESOURCE_DMA
))
480 dma_req
= (int)res
->start
;
481 if (dma_req
>= dma_req_min
&& dma_req
< dma_req_max
)
482 clear_bit(EDMA_CHAN_SLOT(pdev
->resource
[i
].start
),
489 static void edma_setup_interrupt(struct edma_cc
*ecc
, unsigned lch
,
490 void (*callback
)(unsigned channel
, u16 ch_status
, void *data
),
493 lch
= EDMA_CHAN_SLOT(lch
);
496 edma_shadow0_write_array(ecc
, SH_IECR
, lch
>> 5,
499 ecc
->intr_data
[lch
].callback
= callback
;
500 ecc
->intr_data
[lch
].data
= data
;
503 edma_shadow0_write_array(ecc
, SH_ICR
, lch
>> 5,
505 edma_shadow0_write_array(ecc
, SH_IESR
, lch
>> 5,
511 * paRAM management functions
515 * edma_write_slot - write parameter RAM data for slot
516 * @ecc: pointer to edma_cc struct
517 * @slot: number of parameter RAM slot being modified
518 * @param: data to be written into parameter RAM slot
520 * Use this to assign all parameters of a transfer at once. This
521 * allows more efficient setup of transfers than issuing multiple
522 * calls to set up those parameters in small pieces, and provides
523 * complete control over all transfer options.
525 static void edma_write_slot(struct edma_cc
*ecc
, unsigned slot
,
526 const struct edmacc_param
*param
)
528 slot
= EDMA_CHAN_SLOT(slot
);
529 if (slot
>= ecc
->num_slots
)
531 memcpy_toio(ecc
->base
+ PARM_OFFSET(slot
), param
, PARM_SIZE
);
535 * edma_read_slot - read parameter RAM data from slot
536 * @ecc: pointer to edma_cc struct
537 * @slot: number of parameter RAM slot being copied
538 * @param: where to store copy of parameter RAM data
540 * Use this to read data from a parameter RAM slot, perhaps to
541 * save them as a template for later reuse.
543 static void edma_read_slot(struct edma_cc
*ecc
, unsigned slot
,
544 struct edmacc_param
*param
)
546 slot
= EDMA_CHAN_SLOT(slot
);
547 if (slot
>= ecc
->num_slots
)
549 memcpy_fromio(param
, ecc
->base
+ PARM_OFFSET(slot
), PARM_SIZE
);
553 * edma_alloc_slot - allocate DMA parameter RAM
554 * @ecc: pointer to edma_cc struct
555 * @slot: specific slot to allocate; negative for "any unused slot"
557 * This allocates a parameter RAM slot, initializing it to hold a
558 * dummy transfer. Slots allocated using this routine have not been
559 * mapped to a hardware DMA channel, and will normally be used by
560 * linking to them from a slot associated with a DMA channel.
562 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
563 * slots may be allocated on behalf of DSP firmware.
565 * Returns the number of the slot, else negative errno.
567 static int edma_alloc_slot(struct edma_cc
*ecc
, int slot
)
570 slot
= EDMA_CHAN_SLOT(slot
);
572 slot
= ecc
->num_channels
;
574 slot
= find_next_zero_bit(ecc
->edma_inuse
,
577 if (slot
== ecc
->num_slots
)
579 if (!test_and_set_bit(slot
, ecc
->edma_inuse
))
582 } else if (slot
< ecc
->num_channels
|| slot
>= ecc
->num_slots
) {
584 } else if (test_and_set_bit(slot
, ecc
->edma_inuse
)) {
588 edma_write_slot(ecc
, slot
, &dummy_paramset
);
590 return EDMA_CTLR_CHAN(ecc
->id
, slot
);
594 * edma_free_slot - deallocate DMA parameter RAM
595 * @ecc: pointer to edma_cc struct
596 * @slot: parameter RAM slot returned from edma_alloc_slot()
598 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
599 * Callers are responsible for ensuring the slot is inactive, and will
602 static void edma_free_slot(struct edma_cc
*ecc
, unsigned slot
)
604 slot
= EDMA_CHAN_SLOT(slot
);
605 if (slot
< ecc
->num_channels
|| slot
>= ecc
->num_slots
)
608 edma_write_slot(ecc
, slot
, &dummy_paramset
);
609 clear_bit(slot
, ecc
->edma_inuse
);
613 * edma_link - link one parameter RAM slot to another
614 * @ecc: pointer to edma_cc struct
615 * @from: parameter RAM slot originating the link
616 * @to: parameter RAM slot which is the link target
618 * The originating slot should not be part of any active DMA transfer.
620 static void edma_link(struct edma_cc
*ecc
, unsigned from
, unsigned to
)
622 from
= EDMA_CHAN_SLOT(from
);
623 to
= EDMA_CHAN_SLOT(to
);
624 if (from
>= ecc
->num_slots
|| to
>= ecc
->num_slots
)
627 edma_parm_modify(ecc
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
632 * edma_get_position - returns the current transfer point
633 * @ecc: pointer to edma_cc struct
634 * @slot: parameter RAM slot being examined
635 * @dst: true selects the dest position, false the source
637 * Returns the position of the current active slot
639 static dma_addr_t
edma_get_position(struct edma_cc
*ecc
, unsigned slot
,
644 slot
= EDMA_CHAN_SLOT(slot
);
645 offs
= PARM_OFFSET(slot
);
646 offs
+= dst
? PARM_DST
: PARM_SRC
;
648 return edma_read(ecc
, offs
);
651 /*-----------------------------------------------------------------------*/
653 * edma_start - start dma on a channel
654 * @ecc: pointer to edma_cc struct
655 * @channel: channel being activated
657 * Channels with event associations will be triggered by their hardware
658 * events, and channels without such associations will be triggered by
659 * software. (At this writing there is no interface for using software
660 * triggers except with channels that don't support hardware triggers.)
662 * Returns zero on success, else negative errno.
664 static int edma_start(struct edma_cc
*ecc
, unsigned channel
)
666 if (ecc
->id
!= EDMA_CTLR(channel
)) {
667 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
668 ecc
->id
, EDMA_CTLR(channel
));
671 channel
= EDMA_CHAN_SLOT(channel
);
673 if (channel
< ecc
->num_channels
) {
674 int j
= channel
>> 5;
675 unsigned int mask
= BIT(channel
& 0x1f);
677 /* EDMA channels without event association */
678 if (test_bit(channel
, ecc
->edma_unused
)) {
679 pr_debug("EDMA: ESR%d %08x\n", j
,
680 edma_shadow0_read_array(ecc
, SH_ESR
, j
));
681 edma_shadow0_write_array(ecc
, SH_ESR
, j
, mask
);
685 /* EDMA channel with event association */
686 pr_debug("EDMA: ER%d %08x\n", j
,
687 edma_shadow0_read_array(ecc
, SH_ER
, j
));
688 /* Clear any pending event or error */
689 edma_write_array(ecc
, EDMA_ECR
, j
, mask
);
690 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
692 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
693 edma_shadow0_write_array(ecc
, SH_EESR
, j
, mask
);
694 pr_debug("EDMA: EER%d %08x\n", j
,
695 edma_shadow0_read_array(ecc
, SH_EER
, j
));
703 * edma_stop - stops dma on the channel passed
704 * @ecc: pointer to edma_cc struct
705 * @channel: channel being deactivated
707 * When @lch is a channel, any active transfer is paused and
708 * all pending hardware events are cleared. The current transfer
709 * may not be resumed, and the channel's Parameter RAM should be
710 * reinitialized before being reused.
712 static void edma_stop(struct edma_cc
*ecc
, unsigned channel
)
714 if (ecc
->id
!= EDMA_CTLR(channel
)) {
715 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
716 ecc
->id
, EDMA_CTLR(channel
));
719 channel
= EDMA_CHAN_SLOT(channel
);
721 if (channel
< ecc
->num_channels
) {
722 int j
= channel
>> 5;
723 unsigned int mask
= BIT(channel
& 0x1f);
725 edma_shadow0_write_array(ecc
, SH_EECR
, j
, mask
);
726 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
727 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
728 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
730 /* clear possibly pending completion interrupt */
731 edma_shadow0_write_array(ecc
, SH_ICR
, j
, mask
);
733 pr_debug("EDMA: EER%d %08x\n", j
,
734 edma_shadow0_read_array(ecc
, SH_EER
, j
));
736 /* REVISIT: consider guarding against inappropriate event
737 * chaining by overwriting with dummy_paramset.
743 * edma_pause - pause dma on a channel
744 * @ecc: pointer to edma_cc struct
745 * @channel: on which edma_start() has been called
747 * This temporarily disables EDMA hardware events on the specified channel,
748 * preventing them from triggering new transfers on its behalf
750 static void edma_pause(struct edma_cc
*ecc
, unsigned channel
)
752 if (ecc
->id
!= EDMA_CTLR(channel
)) {
753 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
754 ecc
->id
, EDMA_CTLR(channel
));
757 channel
= EDMA_CHAN_SLOT(channel
);
759 if (channel
< ecc
->num_channels
) {
760 unsigned int mask
= BIT(channel
& 0x1f);
762 edma_shadow0_write_array(ecc
, SH_EECR
, channel
>> 5, mask
);
767 * edma_resume - resumes dma on a paused channel
768 * @ecc: pointer to edma_cc struct
769 * @channel: on which edma_pause() has been called
771 * This re-enables EDMA hardware events on the specified channel.
773 static void edma_resume(struct edma_cc
*ecc
, unsigned channel
)
775 if (ecc
->id
!= EDMA_CTLR(channel
)) {
776 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
777 ecc
->id
, EDMA_CTLR(channel
));
780 channel
= EDMA_CHAN_SLOT(channel
);
782 if (channel
< ecc
->num_channels
) {
783 unsigned int mask
= BIT(channel
& 0x1f);
785 edma_shadow0_write_array(ecc
, SH_EESR
, channel
>> 5, mask
);
789 static int edma_trigger_channel(struct edma_cc
*ecc
, unsigned channel
)
793 if (ecc
->id
!= EDMA_CTLR(channel
)) {
794 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
795 ecc
->id
, EDMA_CTLR(channel
));
798 channel
= EDMA_CHAN_SLOT(channel
);
799 mask
= BIT(channel
& 0x1f);
801 edma_shadow0_write_array(ecc
, SH_ESR
, (channel
>> 5), mask
);
803 pr_debug("EDMA: ESR%d %08x\n", (channel
>> 5),
804 edma_shadow0_read_array(ecc
, SH_ESR
, (channel
>> 5)));
808 /******************************************************************************
810 * It cleans ParamEntry qand bring back EDMA to initial state if media has
811 * been removed before EDMA has finished.It is usedful for removable media.
815 * Return: zero on success, or corresponding error no on failure
817 * FIXME this should not be needed ... edma_stop() should suffice.
819 *****************************************************************************/
821 static void edma_clean_channel(struct edma_cc
*ecc
, unsigned channel
)
823 if (ecc
->id
!= EDMA_CTLR(channel
)) {
824 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
825 ecc
->id
, EDMA_CTLR(channel
));
828 channel
= EDMA_CHAN_SLOT(channel
);
830 if (channel
< ecc
->num_channels
) {
831 int j
= (channel
>> 5);
832 unsigned int mask
= BIT(channel
& 0x1f);
834 pr_debug("EDMA: EMR%d %08x\n", j
,
835 edma_read_array(ecc
, EDMA_EMR
, j
));
836 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
837 /* Clear the corresponding EMR bits */
838 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
840 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
841 edma_write(ecc
, EDMA_CCERRCLR
, BIT(16) | BIT(1) | BIT(0));
846 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
847 * @ecc: pointer to edma_cc struct
848 * @channel: specific channel to allocate; negative for "any unmapped channel"
849 * @callback: optional; to be issued on DMA completion or errors
850 * @data: passed to callback
851 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
852 * Controller (TC) executes requests using this channel. Use
853 * EVENTQ_DEFAULT unless you really need a high priority queue.
855 * This allocates a DMA channel and its associated parameter RAM slot.
856 * The parameter RAM is initialized to hold a dummy transfer.
858 * Normal use is to pass a specific channel number as @channel, to make
859 * use of hardware events mapped to that channel. When the channel will
860 * be used only for software triggering or event chaining, channels not
861 * mapped to hardware events (or mapped to unused events) are preferable.
863 * DMA transfers start from a channel using edma_start(), or by
864 * chaining. When the transfer described in that channel's parameter RAM
865 * slot completes, that slot's data may be reloaded through a link.
867 * DMA errors are only reported to the @callback associated with the
868 * channel driving that transfer, but transfer completion callbacks can
869 * be sent to another channel under control of the TCC field in
870 * the option word of the transfer's parameter RAM set. Drivers must not
871 * use DMA transfer completion callbacks for channels they did not allocate.
872 * (The same applies to TCC codes used in transfer chaining.)
874 * Returns the number of the channel, else negative errno.
876 static int edma_alloc_channel(struct edma_cc
*ecc
, int channel
,
877 void (*callback
)(unsigned channel
, u16 ch_status
, void *data
),
879 enum dma_event_q eventq_no
)
884 if (!ecc
->unused_chan_list_done
) {
886 * Scan all the platform devices to find out the EDMA channels
887 * used and clear them in the unused list, making the rest
888 * available for ARM usage.
890 ret
= bus_for_each_dev(&platform_bus_type
, NULL
, ecc
,
891 prepare_unused_channel_list
);
895 ecc
->unused_chan_list_done
= true;
899 if (ecc
->id
!= EDMA_CTLR(channel
)) {
900 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n",
901 __func__
, ecc
->id
, EDMA_CTLR(channel
));
904 channel
= EDMA_CHAN_SLOT(channel
);
910 channel
= find_next_bit(ecc
->edma_unused
,
911 ecc
->num_channels
, channel
);
912 if (channel
== ecc
->num_channels
)
914 if (!test_and_set_bit(channel
, ecc
->edma_inuse
)) {
922 } else if (channel
>= ecc
->num_channels
) {
924 } else if (test_and_set_bit(channel
, ecc
->edma_inuse
)) {
928 /* ensure access through shadow region 0 */
929 edma_or_array2(ecc
, EDMA_DRAE
, 0, channel
>> 5, BIT(channel
& 0x1f));
931 /* ensure no events are pending */
932 edma_stop(ecc
, EDMA_CTLR_CHAN(ecc
->id
, channel
));
933 edma_write_slot(ecc
, channel
, &dummy_paramset
);
936 edma_setup_interrupt(ecc
, EDMA_CTLR_CHAN(ecc
->id
, channel
),
939 edma_map_dmach_to_queue(ecc
, channel
, eventq_no
);
941 return EDMA_CTLR_CHAN(ecc
->id
, channel
);
945 * edma_free_channel - deallocate DMA channel
946 * @ecc: pointer to edma_cc struct
947 * @channel: dma channel returned from edma_alloc_channel()
949 * This deallocates the DMA channel and associated parameter RAM slot
950 * allocated by edma_alloc_channel().
952 * Callers are responsible for ensuring the channel is inactive, and
953 * will not be reactivated by linking, chaining, or software calls to
956 static void edma_free_channel(struct edma_cc
*ecc
, unsigned channel
)
958 if (ecc
->id
!= EDMA_CTLR(channel
)) {
959 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
960 ecc
->id
, EDMA_CTLR(channel
));
963 channel
= EDMA_CHAN_SLOT(channel
);
965 if (channel
>= ecc
->num_channels
)
968 edma_setup_interrupt(ecc
, channel
, NULL
, NULL
);
969 /* REVISIT should probably take out of shadow region 0 */
971 memcpy_toio(ecc
->base
+ PARM_OFFSET(channel
), &dummy_paramset
,
973 clear_bit(channel
, ecc
->edma_inuse
);
977 * edma_assign_channel_eventq - move given channel to desired eventq
979 * channel - channel number
980 * eventq_no - queue to move the channel
982 * Can be used to move a channel to a selected event queue.
984 static void edma_assign_channel_eventq(struct edma_cc
*ecc
, unsigned channel
,
985 enum dma_event_q eventq_no
)
987 if (ecc
->id
!= EDMA_CTLR(channel
)) {
988 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
989 ecc
->id
, EDMA_CTLR(channel
));
992 channel
= EDMA_CHAN_SLOT(channel
);
994 if (channel
>= ecc
->num_channels
)
997 /* default to low priority queue */
998 if (eventq_no
== EVENTQ_DEFAULT
)
999 eventq_no
= ecc
->default_queue
;
1000 if (eventq_no
>= ecc
->num_tc
)
1003 edma_map_dmach_to_queue(ecc
, channel
, eventq_no
);
1006 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
1008 struct edma_cc
*ecc
= data
;
1018 dev_dbg(ecc
->dev
, "dma_irq_handler\n");
1020 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 0);
1022 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 1);
1025 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 1);
1028 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 0);
1036 dev_dbg(ecc
->dev
, "IPR%d %08x\n", bank
, sh_ipr
);
1038 slot
= __ffs(sh_ipr
);
1039 sh_ipr
&= ~(BIT(slot
));
1041 if (sh_ier
& BIT(slot
)) {
1042 channel
= (bank
<< 5) | slot
;
1043 /* Clear the corresponding IPR bits */
1044 edma_shadow0_write_array(ecc
, SH_ICR
, bank
, BIT(slot
));
1045 if (ecc
->intr_data
[channel
].callback
)
1046 ecc
->intr_data
[channel
].callback(
1047 EDMA_CTLR_CHAN(ctlr
, channel
),
1049 ecc
->intr_data
[channel
].data
);
1053 edma_shadow0_write(ecc
, SH_IEVAL
, 1);
1057 /******************************************************************************
1059 * DMA error interrupt handler
1061 *****************************************************************************/
1062 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
1064 struct edma_cc
*ecc
= data
;
1067 unsigned int cnt
= 0;
1073 dev_dbg(ecc
->dev
, "dma_ccerr_handler\n");
1075 if ((edma_read_array(ecc
, EDMA_EMR
, 0) == 0) &&
1076 (edma_read_array(ecc
, EDMA_EMR
, 1) == 0) &&
1077 (edma_read(ecc
, EDMA_QEMR
) == 0) &&
1078 (edma_read(ecc
, EDMA_CCERR
) == 0))
1084 if (edma_read_array(ecc
, EDMA_EMR
, 0))
1086 else if (edma_read_array(ecc
, EDMA_EMR
, 1))
1089 dev_dbg(ecc
->dev
, "EMR%d %08x\n", j
,
1090 edma_read_array(ecc
, EDMA_EMR
, j
));
1091 for (i
= 0; i
< 32; i
++) {
1092 int k
= (j
<< 5) + i
;
1094 if (edma_read_array(ecc
, EDMA_EMR
, j
) &
1096 /* Clear the corresponding EMR bits */
1097 edma_write_array(ecc
, EDMA_EMCR
, j
,
1100 edma_shadow0_write_array(ecc
, SH_SECR
,
1102 if (ecc
->intr_data
[k
].callback
) {
1103 ecc
->intr_data
[k
].callback(
1104 EDMA_CTLR_CHAN(ctlr
, k
),
1106 ecc
->intr_data
[k
].data
);
1110 } else if (edma_read(ecc
, EDMA_QEMR
)) {
1111 dev_dbg(ecc
->dev
, "QEMR %02x\n",
1112 edma_read(ecc
, EDMA_QEMR
));
1113 for (i
= 0; i
< 8; i
++) {
1114 if (edma_read(ecc
, EDMA_QEMR
) & BIT(i
)) {
1115 /* Clear the corresponding IPR bits */
1116 edma_write(ecc
, EDMA_QEMCR
, BIT(i
));
1117 edma_shadow0_write(ecc
, SH_QSECR
,
1120 /* NOTE: not reported!! */
1123 } else if (edma_read(ecc
, EDMA_CCERR
)) {
1124 dev_dbg(ecc
->dev
, "CCERR %08x\n",
1125 edma_read(ecc
, EDMA_CCERR
));
1126 /* FIXME: CCERR.BIT(16) ignored! much better
1127 * to just write CCERRCLR with CCERR value...
1129 for (i
= 0; i
< 8; i
++) {
1130 if (edma_read(ecc
, EDMA_CCERR
) & BIT(i
)) {
1131 /* Clear the corresponding IPR bits */
1132 edma_write(ecc
, EDMA_CCERRCLR
, BIT(i
));
1134 /* NOTE: not reported!! */
1138 if ((edma_read_array(ecc
, EDMA_EMR
, 0) == 0) &&
1139 (edma_read_array(ecc
, EDMA_EMR
, 1) == 0) &&
1140 (edma_read(ecc
, EDMA_QEMR
) == 0) &&
1141 (edma_read(ecc
, EDMA_CCERR
) == 0))
1147 edma_write(ecc
, EDMA_EEVAL
, 1);
1151 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
1153 return container_of(d
, struct edma_cc
, dma_slave
);
1156 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
1158 return container_of(c
, struct edma_chan
, vchan
.chan
);
1161 static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor
*tx
)
1163 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
1166 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
1168 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
1171 /* Dispatch a queued descriptor to the controller (caller holds lock) */
1172 static void edma_execute(struct edma_chan
*echan
)
1174 struct edma_cc
*ecc
= echan
->ecc
;
1175 struct virt_dma_desc
*vdesc
;
1176 struct edma_desc
*edesc
;
1177 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1178 int i
, j
, left
, nslots
;
1180 if (!echan
->edesc
) {
1181 /* Setup is needed for the first transfer */
1182 vdesc
= vchan_next_desc(&echan
->vchan
);
1185 list_del(&vdesc
->node
);
1186 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
1189 edesc
= echan
->edesc
;
1191 /* Find out how many left */
1192 left
= edesc
->pset_nr
- edesc
->processed
;
1193 nslots
= min(MAX_NR_SG
, left
);
1196 /* Write descriptor PaRAM set(s) */
1197 for (i
= 0; i
< nslots
; i
++) {
1198 j
= i
+ edesc
->processed
;
1199 edma_write_slot(ecc
, echan
->slot
[i
], &edesc
->pset
[j
].param
);
1200 edesc
->sg_len
+= edesc
->pset
[j
].len
;
1201 dev_vdbg(echan
->vchan
.chan
.device
->dev
,
1213 j
, echan
->ch_num
, echan
->slot
[i
],
1214 edesc
->pset
[j
].param
.opt
,
1215 edesc
->pset
[j
].param
.src
,
1216 edesc
->pset
[j
].param
.dst
,
1217 edesc
->pset
[j
].param
.a_b_cnt
,
1218 edesc
->pset
[j
].param
.ccnt
,
1219 edesc
->pset
[j
].param
.src_dst_bidx
,
1220 edesc
->pset
[j
].param
.src_dst_cidx
,
1221 edesc
->pset
[j
].param
.link_bcntrld
);
1222 /* Link to the previous slot if not the last set */
1223 if (i
!= (nslots
- 1))
1224 edma_link(ecc
, echan
->slot
[i
], echan
->slot
[i
+ 1]);
1227 edesc
->processed
+= nslots
;
1230 * If this is either the last set in a set of SG-list transactions
1231 * then setup a link to the dummy slot, this results in all future
1232 * events being absorbed and that's OK because we're done
1234 if (edesc
->processed
== edesc
->pset_nr
) {
1236 edma_link(ecc
, echan
->slot
[nslots
- 1], echan
->slot
[1]);
1238 edma_link(ecc
, echan
->slot
[nslots
- 1],
1239 echan
->ecc
->dummy_slot
);
1242 if (echan
->missed
) {
1244 * This happens due to setup times between intermediate
1245 * transfers in long SG lists which have to be broken up into
1246 * transfers of MAX_NR_SG
1248 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
1249 edma_clean_channel(ecc
, echan
->ch_num
);
1250 edma_stop(ecc
, echan
->ch_num
);
1251 edma_start(ecc
, echan
->ch_num
);
1252 edma_trigger_channel(ecc
, echan
->ch_num
);
1254 } else if (edesc
->processed
<= MAX_NR_SG
) {
1255 dev_dbg(dev
, "first transfer starting on channel %d\n",
1257 edma_start(ecc
, echan
->ch_num
);
1259 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
1260 echan
->ch_num
, edesc
->processed
);
1261 edma_resume(ecc
, echan
->ch_num
);
1265 static int edma_terminate_all(struct dma_chan
*chan
)
1267 struct edma_chan
*echan
= to_edma_chan(chan
);
1268 unsigned long flags
;
1271 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1274 * Stop DMA activity: we assume the callback will not be called
1275 * after edma_dma() returns (even if it does, it will see
1276 * echan->edesc is NULL and exit.)
1279 edma_stop(echan
->ecc
, echan
->ch_num
);
1280 /* Move the cyclic channel back to default queue */
1281 if (echan
->edesc
->cyclic
)
1282 edma_assign_channel_eventq(echan
->ecc
, echan
->ch_num
,
1285 * free the running request descriptor
1286 * since it is not in any of the vdesc lists
1288 edma_desc_free(&echan
->edesc
->vdesc
);
1289 echan
->edesc
= NULL
;
1292 vchan_get_all_descriptors(&echan
->vchan
, &head
);
1293 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1294 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
1299 static int edma_slave_config(struct dma_chan
*chan
,
1300 struct dma_slave_config
*cfg
)
1302 struct edma_chan
*echan
= to_edma_chan(chan
);
1304 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1305 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1308 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
1313 static int edma_dma_pause(struct dma_chan
*chan
)
1315 struct edma_chan
*echan
= to_edma_chan(chan
);
1320 edma_pause(echan
->ecc
, echan
->ch_num
);
1324 static int edma_dma_resume(struct dma_chan
*chan
)
1326 struct edma_chan
*echan
= to_edma_chan(chan
);
1328 edma_resume(echan
->ecc
, echan
->ch_num
);
1333 * A PaRAM set configuration abstraction used by other modes
1334 * @chan: Channel who's PaRAM set we're configuring
1335 * @pset: PaRAM set to initialize and setup.
1336 * @src_addr: Source address of the DMA
1337 * @dst_addr: Destination address of the DMA
1338 * @burst: In units of dev_width, how much to send
1339 * @dev_width: How much is the dev_width
1340 * @dma_length: Total length of the DMA transfer
1341 * @direction: Direction of the transfer
1343 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
1344 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
1345 enum dma_slave_buswidth dev_width
,
1346 unsigned int dma_length
,
1347 enum dma_transfer_direction direction
)
1349 struct edma_chan
*echan
= to_edma_chan(chan
);
1350 struct device
*dev
= chan
->device
->dev
;
1351 struct edmacc_param
*param
= &epset
->param
;
1352 int acnt
, bcnt
, ccnt
, cidx
;
1353 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
1358 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
1362 * If the maxburst is equal to the fifo width, use
1363 * A-synced transfers. This allows for large contiguous
1364 * buffer transfers using only one PaRAM set.
1368 * For the A-sync case, bcnt and ccnt are the remainder
1369 * and quotient respectively of the division of:
1370 * (dma_length / acnt) by (SZ_64K -1). This is so
1371 * that in case bcnt over flows, we have ccnt to use.
1372 * Note: In A-sync tranfer only, bcntrld is used, but it
1373 * only applies for sg_dma_len(sg) >= SZ_64K.
1374 * In this case, the best way adopted is- bccnt for the
1375 * first frame will be the remainder below. Then for
1376 * every successive frame, bcnt will be SZ_64K-1. This
1377 * is assured as bcntrld = 0xffff in end of function.
1380 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
1381 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
1383 * If bcnt is non-zero, we have a remainder and hence an
1384 * extra frame to transfer, so increment ccnt.
1393 * If maxburst is greater than the fifo address_width,
1394 * use AB-synced transfers where A count is the fifo
1395 * address_width and B count is the maxburst. In this
1396 * case, we are limited to transfers of C count frames
1397 * of (address_width * maxburst) where C count is limited
1398 * to SZ_64K-1. This places an upper bound on the length
1399 * of an SG segment that can be handled.
1403 ccnt
= dma_length
/ (acnt
* bcnt
);
1404 if (ccnt
> (SZ_64K
- 1)) {
1405 dev_err(dev
, "Exceeded max SG segment size\n");
1411 epset
->len
= dma_length
;
1413 if (direction
== DMA_MEM_TO_DEV
) {
1418 epset
->addr
= src_addr
;
1419 } else if (direction
== DMA_DEV_TO_MEM
) {
1424 epset
->addr
= dst_addr
;
1425 } else if (direction
== DMA_MEM_TO_MEM
) {
1431 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
1435 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
1436 /* Configure A or AB synchronized transfers */
1438 param
->opt
|= SYNCDIM
;
1440 param
->src
= src_addr
;
1441 param
->dst
= dst_addr
;
1443 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
1444 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
1446 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
1449 * Only time when (bcntrld) auto reload is required is for
1450 * A-sync case, and in this case, a requirement of reload value
1451 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1452 * and then later will be populated by edma_execute.
1454 param
->link_bcntrld
= 0xffffffff;
1458 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
1459 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1460 unsigned int sg_len
, enum dma_transfer_direction direction
,
1461 unsigned long tx_flags
, void *context
)
1463 struct edma_chan
*echan
= to_edma_chan(chan
);
1464 struct device
*dev
= chan
->device
->dev
;
1465 struct edma_desc
*edesc
;
1466 dma_addr_t src_addr
= 0, dst_addr
= 0;
1467 enum dma_slave_buswidth dev_width
;
1469 struct scatterlist
*sg
;
1472 if (unlikely(!echan
|| !sgl
|| !sg_len
))
1475 if (direction
== DMA_DEV_TO_MEM
) {
1476 src_addr
= echan
->cfg
.src_addr
;
1477 dev_width
= echan
->cfg
.src_addr_width
;
1478 burst
= echan
->cfg
.src_maxburst
;
1479 } else if (direction
== DMA_MEM_TO_DEV
) {
1480 dst_addr
= echan
->cfg
.dst_addr
;
1481 dev_width
= echan
->cfg
.dst_addr_width
;
1482 burst
= echan
->cfg
.dst_maxburst
;
1484 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1488 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1489 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1493 edesc
= kzalloc(sizeof(*edesc
) + sg_len
* sizeof(edesc
->pset
[0]),
1496 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1500 edesc
->pset_nr
= sg_len
;
1502 edesc
->direction
= direction
;
1503 edesc
->echan
= echan
;
1505 /* Allocate a PaRAM slot, if needed */
1506 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
1508 for (i
= 0; i
< nslots
; i
++) {
1509 if (echan
->slot
[i
] < 0) {
1511 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1512 if (echan
->slot
[i
] < 0) {
1514 dev_err(dev
, "%s: Failed to allocate slot\n",
1521 /* Configure PaRAM sets for each SG */
1522 for_each_sg(sgl
, sg
, sg_len
, i
) {
1523 /* Get address for each SG */
1524 if (direction
== DMA_DEV_TO_MEM
)
1525 dst_addr
= sg_dma_address(sg
);
1527 src_addr
= sg_dma_address(sg
);
1529 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1530 dst_addr
, burst
, dev_width
,
1531 sg_dma_len(sg
), direction
);
1537 edesc
->absync
= ret
;
1538 edesc
->residue
+= sg_dma_len(sg
);
1540 /* If this is the last in a current SG set of transactions,
1541 enable interrupts so that next set is processed */
1542 if (!((i
+1) % MAX_NR_SG
))
1543 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1545 /* If this is the last set, enable completion interrupt flag */
1546 if (i
== sg_len
- 1)
1547 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1549 edesc
->residue_stat
= edesc
->residue
;
1551 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1554 static struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
1555 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1556 size_t len
, unsigned long tx_flags
)
1559 struct edma_desc
*edesc
;
1560 struct device
*dev
= chan
->device
->dev
;
1561 struct edma_chan
*echan
= to_edma_chan(chan
);
1563 if (unlikely(!echan
|| !len
))
1566 edesc
= kzalloc(sizeof(*edesc
) + sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
1568 dev_dbg(dev
, "Failed to allocate a descriptor\n");
1574 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
1575 DMA_SLAVE_BUSWIDTH_4_BYTES
, len
, DMA_MEM_TO_MEM
);
1579 edesc
->absync
= ret
;
1582 * Enable intermediate transfer chaining to re-trigger channel
1583 * on completion of every TR, and enable transfer-completion
1584 * interrupt on completion of the whole transfer.
1586 edesc
->pset
[0].param
.opt
|= ITCCHEN
;
1587 edesc
->pset
[0].param
.opt
|= TCINTEN
;
1589 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1592 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
1593 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1594 size_t period_len
, enum dma_transfer_direction direction
,
1595 unsigned long tx_flags
)
1597 struct edma_chan
*echan
= to_edma_chan(chan
);
1598 struct device
*dev
= chan
->device
->dev
;
1599 struct edma_desc
*edesc
;
1600 dma_addr_t src_addr
, dst_addr
;
1601 enum dma_slave_buswidth dev_width
;
1605 if (unlikely(!echan
|| !buf_len
|| !period_len
))
1608 if (direction
== DMA_DEV_TO_MEM
) {
1609 src_addr
= echan
->cfg
.src_addr
;
1610 dst_addr
= buf_addr
;
1611 dev_width
= echan
->cfg
.src_addr_width
;
1612 burst
= echan
->cfg
.src_maxburst
;
1613 } else if (direction
== DMA_MEM_TO_DEV
) {
1614 src_addr
= buf_addr
;
1615 dst_addr
= echan
->cfg
.dst_addr
;
1616 dev_width
= echan
->cfg
.dst_addr_width
;
1617 burst
= echan
->cfg
.dst_maxburst
;
1619 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1623 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1624 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1628 if (unlikely(buf_len
% period_len
)) {
1629 dev_err(dev
, "Period should be multiple of Buffer length\n");
1633 nslots
= (buf_len
/ period_len
) + 1;
1636 * Cyclic DMA users such as audio cannot tolerate delays introduced
1637 * by cases where the number of periods is more than the maximum
1638 * number of SGs the EDMA driver can handle at a time. For DMA types
1639 * such as Slave SGs, such delays are tolerable and synchronized,
1640 * but the synchronization is difficult to achieve with Cyclic and
1641 * cannot be guaranteed, so we error out early.
1643 if (nslots
> MAX_NR_SG
)
1646 edesc
= kzalloc(sizeof(*edesc
) + nslots
* sizeof(edesc
->pset
[0]),
1649 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1654 edesc
->pset_nr
= nslots
;
1655 edesc
->residue
= edesc
->residue_stat
= buf_len
;
1656 edesc
->direction
= direction
;
1657 edesc
->echan
= echan
;
1659 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1660 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
1662 for (i
= 0; i
< nslots
; i
++) {
1663 /* Allocate a PaRAM slot, if needed */
1664 if (echan
->slot
[i
] < 0) {
1666 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1667 if (echan
->slot
[i
] < 0) {
1669 dev_err(dev
, "%s: Failed to allocate slot\n",
1675 if (i
== nslots
- 1) {
1676 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
1677 sizeof(edesc
->pset
[0]));
1681 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1682 dst_addr
, burst
, dev_width
, period_len
,
1689 if (direction
== DMA_DEV_TO_MEM
)
1690 dst_addr
+= period_len
;
1692 src_addr
+= period_len
;
1694 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
1707 i
, echan
->ch_num
, echan
->slot
[i
],
1708 edesc
->pset
[i
].param
.opt
,
1709 edesc
->pset
[i
].param
.src
,
1710 edesc
->pset
[i
].param
.dst
,
1711 edesc
->pset
[i
].param
.a_b_cnt
,
1712 edesc
->pset
[i
].param
.ccnt
,
1713 edesc
->pset
[i
].param
.src_dst_bidx
,
1714 edesc
->pset
[i
].param
.src_dst_cidx
,
1715 edesc
->pset
[i
].param
.link_bcntrld
);
1717 edesc
->absync
= ret
;
1720 * Enable period interrupt only if it is requested
1722 if (tx_flags
& DMA_PREP_INTERRUPT
)
1723 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1726 /* Place the cyclic channel to highest priority queue */
1727 edma_assign_channel_eventq(echan
->ecc
, echan
->ch_num
, EVENTQ_0
);
1729 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1732 static void edma_callback(unsigned ch_num
, u16 ch_status
, void *data
)
1734 struct edma_chan
*echan
= data
;
1735 struct edma_cc
*ecc
= echan
->ecc
;
1736 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1737 struct edma_desc
*edesc
;
1738 struct edmacc_param p
;
1740 edesc
= echan
->edesc
;
1742 spin_lock(&echan
->vchan
.lock
);
1743 switch (ch_status
) {
1744 case EDMA_DMA_COMPLETE
:
1746 if (edesc
->cyclic
) {
1747 vchan_cyclic_callback(&edesc
->vdesc
);
1749 } else if (edesc
->processed
== edesc
->pset_nr
) {
1751 "Transfer completed on channel %d\n",
1754 edma_stop(ecc
, echan
->ch_num
);
1755 vchan_cookie_complete(&edesc
->vdesc
);
1756 echan
->edesc
= NULL
;
1759 "Sub transfer completed on channel %d\n",
1762 edma_pause(ecc
, echan
->ch_num
);
1764 /* Update statistics for tx_status */
1765 edesc
->residue
-= edesc
->sg_len
;
1766 edesc
->residue_stat
= edesc
->residue
;
1767 edesc
->processed_stat
= edesc
->processed
;
1769 edma_execute(echan
);
1772 case EDMA_DMA_CC_ERROR
:
1773 edma_read_slot(ecc
, echan
->slot
[0], &p
);
1776 * Issue later based on missed flag which will be sure
1778 * (1) we finished transmitting an intermediate slot and
1779 * edma_execute is coming up.
1780 * (2) or we finished current transfer and issue will
1781 * call edma_execute.
1783 * Important note: issuing can be dangerous here and
1784 * lead to some nasty recursion when we are in a NULL
1785 * slot. So we avoid doing so and set the missed flag.
1787 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
1788 dev_dbg(dev
, "Error on null slot, setting miss\n");
1792 * The slot is already programmed but the event got
1793 * missed, so its safe to issue it here.
1795 dev_dbg(dev
, "Missed event, TRIGGERING\n");
1796 edma_clean_channel(ecc
, echan
->ch_num
);
1797 edma_stop(ecc
, echan
->ch_num
);
1798 edma_start(ecc
, echan
->ch_num
);
1799 edma_trigger_channel(ecc
, echan
->ch_num
);
1806 spin_unlock(&echan
->vchan
.lock
);
1809 /* Alloc channel resources */
1810 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
1812 struct edma_chan
*echan
= to_edma_chan(chan
);
1813 struct device
*dev
= chan
->device
->dev
;
1818 a_ch_num
= edma_alloc_channel(echan
->ecc
, echan
->ch_num
,
1819 edma_callback
, echan
, EVENTQ_DEFAULT
);
1826 if (a_ch_num
!= echan
->ch_num
) {
1827 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
1828 EDMA_CTLR(echan
->ch_num
),
1829 EDMA_CHAN_SLOT(echan
->ch_num
));
1831 goto err_wrong_chan
;
1834 echan
->alloced
= true;
1835 echan
->slot
[0] = echan
->ch_num
;
1837 dev_dbg(dev
, "allocated channel %d for %u:%u\n", echan
->ch_num
,
1838 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
1843 edma_free_channel(echan
->ecc
, a_ch_num
);
1848 /* Free channel resources */
1849 static void edma_free_chan_resources(struct dma_chan
*chan
)
1851 struct edma_chan
*echan
= to_edma_chan(chan
);
1852 struct device
*dev
= chan
->device
->dev
;
1855 /* Terminate transfers */
1856 edma_stop(echan
->ecc
, echan
->ch_num
);
1858 vchan_free_chan_resources(&echan
->vchan
);
1860 /* Free EDMA PaRAM slots */
1861 for (i
= 1; i
< EDMA_MAX_SLOTS
; i
++) {
1862 if (echan
->slot
[i
] >= 0) {
1863 edma_free_slot(echan
->ecc
, echan
->slot
[i
]);
1864 echan
->slot
[i
] = -1;
1868 /* Free EDMA channel */
1869 if (echan
->alloced
) {
1870 edma_free_channel(echan
->ecc
, echan
->ch_num
);
1871 echan
->alloced
= false;
1874 dev_dbg(dev
, "freeing channel for %u\n", echan
->ch_num
);
1877 /* Send pending descriptor to hardware */
1878 static void edma_issue_pending(struct dma_chan
*chan
)
1880 struct edma_chan
*echan
= to_edma_chan(chan
);
1881 unsigned long flags
;
1883 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1884 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
1885 edma_execute(echan
);
1886 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1889 static u32
edma_residue(struct edma_desc
*edesc
)
1891 bool dst
= edesc
->direction
== DMA_DEV_TO_MEM
;
1892 struct edma_pset
*pset
= edesc
->pset
;
1893 dma_addr_t done
, pos
;
1897 * We always read the dst/src position from the first RamPar
1898 * pset. That's the one which is active now.
1900 pos
= edma_get_position(edesc
->echan
->ecc
, edesc
->echan
->slot
[0], dst
);
1903 * Cyclic is simple. Just subtract pset[0].addr from pos.
1905 * We never update edesc->residue in the cyclic case, so we
1906 * can tell the remaining room to the end of the circular
1909 if (edesc
->cyclic
) {
1910 done
= pos
- pset
->addr
;
1911 edesc
->residue_stat
= edesc
->residue
- done
;
1912 return edesc
->residue_stat
;
1916 * For SG operation we catch up with the last processed
1919 pset
+= edesc
->processed_stat
;
1921 for (i
= edesc
->processed_stat
; i
< edesc
->processed
; i
++, pset
++) {
1923 * If we are inside this pset address range, we know
1924 * this is the active one. Get the current delta and
1925 * stop walking the psets.
1927 if (pos
>= pset
->addr
&& pos
< pset
->addr
+ pset
->len
)
1928 return edesc
->residue_stat
- (pos
- pset
->addr
);
1930 /* Otherwise mark it done and update residue_stat. */
1931 edesc
->processed_stat
++;
1932 edesc
->residue_stat
-= pset
->len
;
1934 return edesc
->residue_stat
;
1937 /* Check request completion status */
1938 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
1939 dma_cookie_t cookie
,
1940 struct dma_tx_state
*txstate
)
1942 struct edma_chan
*echan
= to_edma_chan(chan
);
1943 struct virt_dma_desc
*vdesc
;
1944 enum dma_status ret
;
1945 unsigned long flags
;
1947 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1948 if (ret
== DMA_COMPLETE
|| !txstate
)
1951 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1952 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
)
1953 txstate
->residue
= edma_residue(echan
->edesc
);
1954 else if ((vdesc
= vchan_find_desc(&echan
->vchan
, cookie
)))
1955 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
1956 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1961 static void __init
edma_chan_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1962 struct edma_chan
*echans
)
1966 for (i
= 0; i
< ecc
->num_channels
; i
++) {
1967 struct edma_chan
*echan
= &echans
[i
];
1968 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->id
, i
);
1970 echan
->vchan
.desc_free
= edma_desc_free
;
1972 vchan_init(&echan
->vchan
, dma
);
1974 INIT_LIST_HEAD(&echan
->node
);
1975 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
1976 echan
->slot
[j
] = -1;
1980 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1981 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1982 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1983 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1985 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1988 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
1989 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
1990 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
1991 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
1992 dma
->device_free_chan_resources
= edma_free_chan_resources
;
1993 dma
->device_issue_pending
= edma_issue_pending
;
1994 dma
->device_tx_status
= edma_tx_status
;
1995 dma
->device_config
= edma_slave_config
;
1996 dma
->device_pause
= edma_dma_pause
;
1997 dma
->device_resume
= edma_dma_resume
;
1998 dma
->device_terminate_all
= edma_terminate_all
;
2000 dma
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
2001 dma
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
2002 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2003 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
2008 * code using dma memcpy must make sure alignment of
2009 * length is at dma->copy_align boundary.
2011 dma
->copy_align
= DMAENGINE_ALIGN_4_BYTES
;
2013 INIT_LIST_HEAD(&dma
->channels
);
2016 static int edma_setup_from_hw(struct device
*dev
, struct edma_soc_info
*pdata
,
2017 struct edma_cc
*ecc
)
2021 s8 (*queue_priority_map
)[2];
2023 /* Decode the eDMA3 configuration from CCCFG register */
2024 cccfg
= edma_read(ecc
, EDMA_CCCFG
);
2026 value
= GET_NUM_REGN(cccfg
);
2027 ecc
->num_region
= BIT(value
);
2029 value
= GET_NUM_DMACH(cccfg
);
2030 ecc
->num_channels
= BIT(value
+ 1);
2032 value
= GET_NUM_PAENTRY(cccfg
);
2033 ecc
->num_slots
= BIT(value
+ 4);
2035 value
= GET_NUM_EVQUE(cccfg
);
2036 ecc
->num_tc
= value
+ 1;
2038 dev_dbg(dev
, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg
);
2039 dev_dbg(dev
, "num_region: %u\n", ecc
->num_region
);
2040 dev_dbg(dev
, "num_channels: %u\n", ecc
->num_channels
);
2041 dev_dbg(dev
, "num_slots: %u\n", ecc
->num_slots
);
2042 dev_dbg(dev
, "num_tc: %u\n", ecc
->num_tc
);
2044 /* Nothing need to be done if queue priority is provided */
2045 if (pdata
->queue_priority_mapping
)
2049 * Configure TC/queue priority as follows:
2054 * The meaning of priority numbers: 0 highest priority, 7 lowest
2055 * priority. So Q0 is the highest priority queue and the last queue has
2056 * the lowest priority.
2058 queue_priority_map
= devm_kzalloc(dev
, (ecc
->num_tc
+ 1) * sizeof(s8
),
2060 if (!queue_priority_map
)
2063 for (i
= 0; i
< ecc
->num_tc
; i
++) {
2064 queue_priority_map
[i
][0] = i
;
2065 queue_priority_map
[i
][1] = i
;
2067 queue_priority_map
[i
][0] = -1;
2068 queue_priority_map
[i
][1] = -1;
2070 pdata
->queue_priority_mapping
= queue_priority_map
;
2071 /* Default queue has the lowest priority */
2072 pdata
->default_queue
= i
- 1;
2077 #if IS_ENABLED(CONFIG_OF)
2078 static int edma_xbar_event_map(struct device
*dev
, struct edma_soc_info
*pdata
,
2081 const char pname
[] = "ti,edma-xbar-event-map";
2082 struct resource res
;
2084 s16 (*xbar_chans
)[2];
2085 size_t nelm
= sz
/ sizeof(s16
);
2086 u32 shift
, offset
, mux
;
2089 xbar_chans
= devm_kzalloc(dev
, (nelm
+ 2) * sizeof(s16
), GFP_KERNEL
);
2093 ret
= of_address_to_resource(dev
->of_node
, 1, &res
);
2097 xbar
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
2101 ret
= of_property_read_u16_array(dev
->of_node
, pname
, (u16
*)xbar_chans
,
2106 /* Invalidate last entry for the other user of this mess */
2108 xbar_chans
[nelm
][0] = -1;
2109 xbar_chans
[nelm
][1] = -1;
2111 for (i
= 0; i
< nelm
; i
++) {
2112 shift
= (xbar_chans
[i
][1] & 0x03) << 3;
2113 offset
= xbar_chans
[i
][1] & 0xfffffffc;
2114 mux
= readl(xbar
+ offset
);
2115 mux
&= ~(0xff << shift
);
2116 mux
|= xbar_chans
[i
][0] << shift
;
2117 writel(mux
, (xbar
+ offset
));
2120 pdata
->xbar_chans
= (const s16 (*)[2]) xbar_chans
;
2124 static int edma_of_parse_dt(struct device
*dev
, struct edma_soc_info
*pdata
)
2127 struct property
*prop
;
2129 struct edma_rsv_info
*rsv_info
;
2131 rsv_info
= devm_kzalloc(dev
, sizeof(struct edma_rsv_info
), GFP_KERNEL
);
2134 pdata
->rsv
= rsv_info
;
2136 prop
= of_find_property(dev
->of_node
, "ti,edma-xbar-event-map", &sz
);
2138 ret
= edma_xbar_event_map(dev
, pdata
, sz
);
2143 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
2145 struct edma_soc_info
*info
;
2148 info
= devm_kzalloc(dev
, sizeof(struct edma_soc_info
), GFP_KERNEL
);
2150 return ERR_PTR(-ENOMEM
);
2152 ret
= edma_of_parse_dt(dev
, info
);
2154 return ERR_PTR(ret
);
2159 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
2161 return ERR_PTR(-EINVAL
);
2165 static int edma_probe(struct platform_device
*pdev
)
2167 struct edma_soc_info
*info
= pdev
->dev
.platform_data
;
2168 s8 (*queue_priority_mapping
)[2];
2170 const s16 (*rsv_chans
)[2];
2171 const s16 (*rsv_slots
)[2];
2172 const s16 (*xbar_chans
)[2];
2175 struct resource
*mem
;
2176 struct device_node
*node
= pdev
->dev
.of_node
;
2177 struct device
*dev
= &pdev
->dev
;
2178 struct edma_cc
*ecc
;
2182 info
= edma_setup_info_from_dt(dev
);
2184 dev_err(dev
, "failed to get DT data\n");
2185 return PTR_ERR(info
);
2192 pm_runtime_enable(dev
);
2193 ret
= pm_runtime_get_sync(dev
);
2195 dev_err(dev
, "pm_runtime_get_sync() failed\n");
2199 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2203 ecc
= devm_kzalloc(&pdev
->dev
, sizeof(*ecc
), GFP_KERNEL
);
2205 dev_err(&pdev
->dev
, "Can't allocate controller\n");
2211 /* When booting with DT the pdev->id is -1 */
2215 mem
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "edma3_cc");
2217 dev_dbg(dev
, "mem resource not found, using index 0\n");
2218 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2220 dev_err(dev
, "no mem resource?\n");
2224 ecc
->base
= devm_ioremap_resource(dev
, mem
);
2225 if (IS_ERR(ecc
->base
))
2226 return PTR_ERR(ecc
->base
);
2228 platform_set_drvdata(pdev
, ecc
);
2230 /* Get eDMA3 configuration from IP */
2231 ret
= edma_setup_from_hw(dev
, info
, ecc
);
2235 /* Allocate memory based on the information we got from the IP */
2236 ecc
->slave_chans
= devm_kcalloc(dev
, ecc
->num_channels
,
2237 sizeof(*ecc
->slave_chans
), GFP_KERNEL
);
2238 if (!ecc
->slave_chans
)
2241 ecc
->intr_data
= devm_kcalloc(dev
, ecc
->num_channels
,
2242 sizeof(*ecc
->intr_data
), GFP_KERNEL
);
2243 if (!ecc
->intr_data
)
2246 ecc
->edma_unused
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_channels
),
2247 sizeof(unsigned long), GFP_KERNEL
);
2248 if (!ecc
->edma_unused
)
2251 ecc
->edma_inuse
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_slots
),
2252 sizeof(unsigned long), GFP_KERNEL
);
2253 if (!ecc
->edma_inuse
)
2256 ecc
->default_queue
= info
->default_queue
;
2258 for (i
= 0; i
< ecc
->num_slots
; i
++)
2259 edma_write_slot(ecc
, i
, &dummy_paramset
);
2261 /* Mark all channels as unused */
2262 memset(ecc
->edma_unused
, 0xff, sizeof(ecc
->edma_unused
));
2265 /* Clear the reserved channels in unused list */
2266 rsv_chans
= info
->rsv
->rsv_chans
;
2268 for (i
= 0; rsv_chans
[i
][0] != -1; i
++) {
2269 off
= rsv_chans
[i
][0];
2270 ln
= rsv_chans
[i
][1];
2271 clear_bits(off
, ln
, ecc
->edma_unused
);
2275 /* Set the reserved slots in inuse list */
2276 rsv_slots
= info
->rsv
->rsv_slots
;
2278 for (i
= 0; rsv_slots
[i
][0] != -1; i
++) {
2279 off
= rsv_slots
[i
][0];
2280 ln
= rsv_slots
[i
][1];
2281 set_bits(off
, ln
, ecc
->edma_inuse
);
2286 /* Clear the xbar mapped channels in unused list */
2287 xbar_chans
= info
->xbar_chans
;
2289 for (i
= 0; xbar_chans
[i
][1] != -1; i
++) {
2290 off
= xbar_chans
[i
][1];
2291 clear_bits(off
, 1, ecc
->edma_unused
);
2295 irq
= platform_get_irq_byname(pdev
, "edma3_ccint");
2296 if (irq
< 0 && node
)
2297 irq
= irq_of_parse_and_map(node
, 0);
2300 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccint",
2302 ret
= devm_request_irq(dev
, irq
, dma_irq_handler
, 0, irq_name
,
2305 dev_err(dev
, "CCINT (%d) failed --> %d\n", irq
, ret
);
2310 irq
= platform_get_irq_byname(pdev
, "edma3_ccerrint");
2311 if (irq
< 0 && node
)
2312 irq
= irq_of_parse_and_map(node
, 2);
2315 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccerrint",
2317 ret
= devm_request_irq(dev
, irq
, dma_ccerr_handler
, 0, irq_name
,
2320 dev_err(dev
, "CCERRINT (%d) failed --> %d\n", irq
, ret
);
2325 for (i
= 0; i
< ecc
->num_channels
; i
++)
2326 edma_map_dmach_to_queue(ecc
, i
, info
->default_queue
);
2328 queue_priority_mapping
= info
->queue_priority_mapping
;
2330 /* Event queue priority mapping */
2331 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2332 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2333 queue_priority_mapping
[i
][1]);
2335 /* Map the channel to param entry if channel mapping logic exist */
2336 if (edma_read(ecc
, EDMA_CCCFG
) & CHMAP_EXIST
)
2337 edma_direct_dmach_to_param_mapping(ecc
);
2339 for (i
= 0; i
< ecc
->num_region
; i
++) {
2340 edma_write_array2(ecc
, EDMA_DRAE
, i
, 0, 0x0);
2341 edma_write_array2(ecc
, EDMA_DRAE
, i
, 1, 0x0);
2342 edma_write_array(ecc
, EDMA_QRAE
, i
, 0x0);
2346 ecc
->dummy_slot
= edma_alloc_slot(ecc
, EDMA_SLOT_ANY
);
2347 if (ecc
->dummy_slot
< 0) {
2348 dev_err(&pdev
->dev
, "Can't allocate PaRAM dummy slot\n");
2349 return ecc
->dummy_slot
;
2352 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
2353 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
2354 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
2355 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
2357 edma_dma_init(ecc
, &ecc
->dma_slave
, &pdev
->dev
);
2359 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
2361 ret
= dma_async_device_register(&ecc
->dma_slave
);
2366 of_dma_controller_register(node
, of_dma_xlate_by_chan_id
,
2369 dev_info(&pdev
->dev
, "TI EDMA DMA engine driver\n");
2374 edma_free_slot(ecc
, ecc
->dummy_slot
);
2378 static int edma_remove(struct platform_device
*pdev
)
2380 struct device
*dev
= &pdev
->dev
;
2381 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2383 if (pdev
->dev
.of_node
)
2384 of_dma_controller_free(pdev
->dev
.of_node
);
2385 dma_async_device_unregister(&ecc
->dma_slave
);
2386 edma_free_slot(ecc
, ecc
->dummy_slot
);
2391 #ifdef CONFIG_PM_SLEEP
2392 static int edma_pm_resume(struct device
*dev
)
2394 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2396 s8 (*queue_priority_mapping
)[2];
2398 queue_priority_mapping
= ecc
->info
->queue_priority_mapping
;
2400 /* Event queue priority mapping */
2401 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2402 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2403 queue_priority_mapping
[i
][1]);
2405 /* Map the channel to param entry if channel mapping logic */
2406 if (edma_read(ecc
, EDMA_CCCFG
) & CHMAP_EXIST
)
2407 edma_direct_dmach_to_param_mapping(ecc
);
2409 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2410 if (test_bit(i
, ecc
->edma_inuse
)) {
2411 /* ensure access through shadow region 0 */
2412 edma_or_array2(ecc
, EDMA_DRAE
, 0, i
>> 5,
2415 edma_setup_interrupt(ecc
, EDMA_CTLR_CHAN(ecc
->id
, i
),
2416 ecc
->intr_data
[i
].callback
,
2417 ecc
->intr_data
[i
].data
);
2425 static const struct dev_pm_ops edma_pm_ops
= {
2426 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL
, edma_pm_resume
)
2429 static struct platform_driver edma_driver
= {
2430 .probe
= edma_probe
,
2431 .remove
= edma_remove
,
2435 .of_match_table
= edma_of_ids
,
2439 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
2441 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
2442 struct edma_chan
*echan
= to_edma_chan(chan
);
2443 unsigned ch_req
= *(unsigned *)param
;
2444 return ch_req
== echan
->ch_num
;
2448 EXPORT_SYMBOL(edma_filter_fn
);
2450 static int edma_init(void)
2452 return platform_driver_register(&edma_driver
);
2454 subsys_initcall(edma_init
);
2456 static void __exit
edma_exit(void)
2458 platform_driver_unregister(&edma_driver
);
2460 module_exit(edma_exit
);
2462 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2463 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2464 MODULE_LICENSE("GPL v2");