2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "dmaengine.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
111 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
112 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
113 #define CHMAP_EXIST BIT(24)
116 * Max of 20 segments per channel to conserve PaRAM slots
117 * Also note that MAX_NR_SG should be atleast the no.of periods
118 * that are required for ASoC, otherwise DMA prep calls will
119 * fail. Today davinci-pcm is the only user of this driver and
120 * requires atleast 17 slots, so we setup the default to 20.
123 #define EDMA_MAX_SLOTS MAX_NR_SG
124 #define EDMA_DESCRIPTORS 16
126 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
127 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
128 #define EDMA_CONT_PARAMS_ANY 1001
129 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
130 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
132 /* PaRAM slots are laid out like this */
133 struct edmacc_param
{
144 /* fields in edmacc_param.opt */
147 #define SYNCDIM BIT(2)
148 #define STATIC BIT(3)
149 #define EDMA_FWID (0x07 << 8)
150 #define TCCMODE BIT(11)
151 #define EDMA_TCC(t) ((t) << 12)
152 #define TCINTEN BIT(20)
153 #define ITCINTEN BIT(21)
154 #define TCCHEN BIT(22)
155 #define ITCCHEN BIT(23)
160 struct edmacc_param param
;
164 struct virt_dma_desc vdesc
;
165 struct list_head node
;
166 enum dma_transfer_direction direction
;
170 struct edma_chan
*echan
;
174 * The following 4 elements are used for residue accounting.
176 * - processed_stat: the number of SG elements we have traversed
177 * so far to cover accounting. This is updated directly to processed
178 * during edma_callback and is always <= processed, because processed
179 * refers to the number of pending transfer (programmed to EDMA
180 * controller), where as processed_stat tracks number of transfers
181 * accounted for so far.
183 * - residue: The amount of bytes we have left to transfer for this desc
185 * - residue_stat: The residue in bytes of data we have covered
186 * so far for accounting. This is updated directly to residue
187 * during callbacks to keep it current.
189 * - sg_len: Tracks the length of the current intermediate transfer,
190 * this is required to update the residue during intermediate transfer
191 * completion callback.
198 struct edma_pset pset
[0];
204 struct virt_dma_chan vchan
;
205 struct list_head node
;
206 struct edma_desc
*edesc
;
210 int slot
[EDMA_MAX_SLOTS
];
212 struct dma_slave_config cfg
;
217 struct edma_soc_info
*info
;
221 /* eDMA3 resource information */
222 unsigned num_channels
;
226 enum dma_event_q default_queue
;
228 bool unused_chan_list_done
;
229 /* The edma_inuse bit for each PaRAM slot is clear unless the
230 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
232 unsigned long *edma_inuse
;
234 /* The edma_unused bit for each channel is clear unless
235 * it is not being used on this platform. It uses a bit
236 * of SOC-specific initialization code.
238 unsigned long *edma_unused
;
240 struct dma_device dma_slave
;
241 struct edma_chan
*slave_chans
;
245 /* dummy param set used to (re)initialize parameter RAM slots */
246 static const struct edmacc_param dummy_paramset
= {
247 .link_bcntrld
= 0xffff,
251 static const struct of_device_id edma_of_ids
[] = {
252 { .compatible
= "ti,edma3", },
256 static inline unsigned int edma_read(struct edma_cc
*ecc
, int offset
)
258 return (unsigned int)__raw_readl(ecc
->base
+ offset
);
261 static inline void edma_write(struct edma_cc
*ecc
, int offset
, int val
)
263 __raw_writel(val
, ecc
->base
+ offset
);
266 static inline void edma_modify(struct edma_cc
*ecc
, int offset
, unsigned and,
269 unsigned val
= edma_read(ecc
, offset
);
273 edma_write(ecc
, offset
, val
);
276 static inline void edma_and(struct edma_cc
*ecc
, int offset
, unsigned and)
278 unsigned val
= edma_read(ecc
, offset
);
281 edma_write(ecc
, offset
, val
);
284 static inline void edma_or(struct edma_cc
*ecc
, int offset
, unsigned or)
286 unsigned val
= edma_read(ecc
, offset
);
289 edma_write(ecc
, offset
, val
);
292 static inline unsigned int edma_read_array(struct edma_cc
*ecc
, int offset
,
295 return edma_read(ecc
, offset
+ (i
<< 2));
298 static inline void edma_write_array(struct edma_cc
*ecc
, int offset
, int i
,
301 edma_write(ecc
, offset
+ (i
<< 2), val
);
304 static inline void edma_modify_array(struct edma_cc
*ecc
, int offset
, int i
,
305 unsigned and, unsigned or)
307 edma_modify(ecc
, offset
+ (i
<< 2), and, or);
310 static inline void edma_or_array(struct edma_cc
*ecc
, int offset
, int i
,
313 edma_or(ecc
, offset
+ (i
<< 2), or);
316 static inline void edma_or_array2(struct edma_cc
*ecc
, int offset
, int i
, int j
,
319 edma_or(ecc
, offset
+ ((i
* 2 + j
) << 2), or);
322 static inline void edma_write_array2(struct edma_cc
*ecc
, int offset
, int i
,
325 edma_write(ecc
, offset
+ ((i
* 2 + j
) << 2), val
);
328 static inline unsigned int edma_shadow0_read(struct edma_cc
*ecc
, int offset
)
330 return edma_read(ecc
, EDMA_SHADOW0
+ offset
);
333 static inline unsigned int edma_shadow0_read_array(struct edma_cc
*ecc
,
336 return edma_read(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
339 static inline void edma_shadow0_write(struct edma_cc
*ecc
, int offset
,
342 edma_write(ecc
, EDMA_SHADOW0
+ offset
, val
);
345 static inline void edma_shadow0_write_array(struct edma_cc
*ecc
, int offset
,
348 edma_write(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
351 static inline unsigned int edma_parm_read(struct edma_cc
*ecc
, int offset
,
354 return edma_read(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5));
357 static inline void edma_parm_write(struct edma_cc
*ecc
, int offset
,
358 int param_no
, unsigned val
)
360 edma_write(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
363 static inline void edma_parm_modify(struct edma_cc
*ecc
, int offset
,
364 int param_no
, unsigned and, unsigned or)
366 edma_modify(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
369 static inline void edma_parm_and(struct edma_cc
*ecc
, int offset
, int param_no
,
372 edma_and(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
375 static inline void edma_parm_or(struct edma_cc
*ecc
, int offset
, int param_no
,
378 edma_or(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
381 static inline void set_bits(int offset
, int len
, unsigned long *p
)
383 for (; len
> 0; len
--)
384 set_bit(offset
+ (len
- 1), p
);
387 static inline void clear_bits(int offset
, int len
, unsigned long *p
)
389 for (; len
> 0; len
--)
390 clear_bit(offset
+ (len
- 1), p
);
393 static void edma_map_dmach_to_queue(struct edma_cc
*ecc
, unsigned ch_no
,
394 enum dma_event_q queue_no
)
396 int bit
= (ch_no
& 0x7) * 4;
398 /* default to low priority queue */
399 if (queue_no
== EVENTQ_DEFAULT
)
400 queue_no
= ecc
->default_queue
;
403 edma_modify_array(ecc
, EDMA_DMAQNUM
, (ch_no
>> 3), ~(0x7 << bit
),
407 static void edma_assign_priority_to_queue(struct edma_cc
*ecc
, int queue_no
,
410 int bit
= queue_no
* 4;
412 edma_modify(ecc
, EDMA_QUEPRI
, ~(0x7 << bit
), ((priority
& 0x7) << bit
));
415 static void edma_direct_dmach_to_param_mapping(struct edma_cc
*ecc
)
419 for (i
= 0; i
< ecc
->num_channels
; i
++)
420 edma_write_array(ecc
, EDMA_DCHMAP
, i
, (i
<< 5));
423 static int prepare_unused_channel_list(struct device
*dev
, void *data
)
425 struct platform_device
*pdev
= to_platform_device(dev
);
426 struct edma_cc
*ecc
= data
;
427 int dma_req_min
= EDMA_CTLR_CHAN(ecc
->id
, 0);
428 int dma_req_max
= dma_req_min
+ ecc
->num_channels
;
430 struct of_phandle_args dma_spec
;
433 struct platform_device
*dma_pdev
;
435 count
= of_property_count_strings(dev
->of_node
, "dma-names");
438 for (i
= 0; i
< count
; i
++) {
439 if (of_parse_phandle_with_args(dev
->of_node
, "dmas",
444 if (!of_match_node(edma_of_ids
, dma_spec
.np
)) {
445 of_node_put(dma_spec
.np
);
449 dma_pdev
= of_find_device_by_node(dma_spec
.np
);
450 if (&dma_pdev
->dev
!= ecc
->dev
)
453 clear_bit(EDMA_CHAN_SLOT(dma_spec
.args
[0]),
455 of_node_put(dma_spec
.np
);
460 /* For non-OF case */
461 for (i
= 0; i
< pdev
->num_resources
; i
++) {
462 struct resource
*res
= &pdev
->resource
[i
];
465 if (!(res
->flags
& IORESOURCE_DMA
))
468 dma_req
= (int)res
->start
;
469 if (dma_req
>= dma_req_min
&& dma_req
< dma_req_max
)
470 clear_bit(EDMA_CHAN_SLOT(pdev
->resource
[i
].start
),
477 static void edma_setup_interrupt(struct edma_cc
*ecc
, unsigned lch
, bool enable
)
479 lch
= EDMA_CHAN_SLOT(lch
);
482 edma_shadow0_write_array(ecc
, SH_ICR
, lch
>> 5,
484 edma_shadow0_write_array(ecc
, SH_IESR
, lch
>> 5,
487 edma_shadow0_write_array(ecc
, SH_IECR
, lch
>> 5,
493 * paRAM slot management functions
495 static void edma_write_slot(struct edma_cc
*ecc
, unsigned slot
,
496 const struct edmacc_param
*param
)
498 slot
= EDMA_CHAN_SLOT(slot
);
499 if (slot
>= ecc
->num_slots
)
501 memcpy_toio(ecc
->base
+ PARM_OFFSET(slot
), param
, PARM_SIZE
);
504 static void edma_read_slot(struct edma_cc
*ecc
, unsigned slot
,
505 struct edmacc_param
*param
)
507 slot
= EDMA_CHAN_SLOT(slot
);
508 if (slot
>= ecc
->num_slots
)
510 memcpy_fromio(param
, ecc
->base
+ PARM_OFFSET(slot
), PARM_SIZE
);
514 * edma_alloc_slot - allocate DMA parameter RAM
515 * @ecc: pointer to edma_cc struct
516 * @slot: specific slot to allocate; negative for "any unused slot"
518 * This allocates a parameter RAM slot, initializing it to hold a
519 * dummy transfer. Slots allocated using this routine have not been
520 * mapped to a hardware DMA channel, and will normally be used by
521 * linking to them from a slot associated with a DMA channel.
523 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
524 * slots may be allocated on behalf of DSP firmware.
526 * Returns the number of the slot, else negative errno.
528 static int edma_alloc_slot(struct edma_cc
*ecc
, int slot
)
531 slot
= EDMA_CHAN_SLOT(slot
);
533 slot
= ecc
->num_channels
;
535 slot
= find_next_zero_bit(ecc
->edma_inuse
,
538 if (slot
== ecc
->num_slots
)
540 if (!test_and_set_bit(slot
, ecc
->edma_inuse
))
543 } else if (slot
< ecc
->num_channels
|| slot
>= ecc
->num_slots
) {
545 } else if (test_and_set_bit(slot
, ecc
->edma_inuse
)) {
549 edma_write_slot(ecc
, slot
, &dummy_paramset
);
551 return EDMA_CTLR_CHAN(ecc
->id
, slot
);
554 static void edma_free_slot(struct edma_cc
*ecc
, unsigned slot
)
556 slot
= EDMA_CHAN_SLOT(slot
);
557 if (slot
< ecc
->num_channels
|| slot
>= ecc
->num_slots
)
560 edma_write_slot(ecc
, slot
, &dummy_paramset
);
561 clear_bit(slot
, ecc
->edma_inuse
);
565 * edma_link - link one parameter RAM slot to another
566 * @ecc: pointer to edma_cc struct
567 * @from: parameter RAM slot originating the link
568 * @to: parameter RAM slot which is the link target
570 * The originating slot should not be part of any active DMA transfer.
572 static void edma_link(struct edma_cc
*ecc
, unsigned from
, unsigned to
)
574 if (unlikely(EDMA_CTLR(from
) != EDMA_CTLR(to
)))
575 dev_warn(ecc
->dev
, "Ignoring eDMA instance for linking\n");
577 from
= EDMA_CHAN_SLOT(from
);
578 to
= EDMA_CHAN_SLOT(to
);
579 if (from
>= ecc
->num_slots
|| to
>= ecc
->num_slots
)
582 edma_parm_modify(ecc
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
587 * edma_get_position - returns the current transfer point
588 * @ecc: pointer to edma_cc struct
589 * @slot: parameter RAM slot being examined
590 * @dst: true selects the dest position, false the source
592 * Returns the position of the current active slot
594 static dma_addr_t
edma_get_position(struct edma_cc
*ecc
, unsigned slot
,
599 slot
= EDMA_CHAN_SLOT(slot
);
600 offs
= PARM_OFFSET(slot
);
601 offs
+= dst
? PARM_DST
: PARM_SRC
;
603 return edma_read(ecc
, offs
);
606 /*-----------------------------------------------------------------------*/
608 * edma_start - start dma on a channel
609 * @ecc: pointer to edma_cc struct
610 * @channel: channel being activated
612 * Channels with event associations will be triggered by their hardware
613 * events, and channels without such associations will be triggered by
614 * software. (At this writing there is no interface for using software
615 * triggers except with channels that don't support hardware triggers.)
617 * Returns zero on success, else negative errno.
619 static int edma_start(struct edma_cc
*ecc
, unsigned channel
)
621 if (ecc
->id
!= EDMA_CTLR(channel
)) {
622 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
623 ecc
->id
, EDMA_CTLR(channel
));
626 channel
= EDMA_CHAN_SLOT(channel
);
628 if (channel
< ecc
->num_channels
) {
629 int j
= channel
>> 5;
630 unsigned int mask
= BIT(channel
& 0x1f);
632 /* EDMA channels without event association */
633 if (test_bit(channel
, ecc
->edma_unused
)) {
634 dev_dbg(ecc
->dev
, "ESR%d %08x\n", j
,
635 edma_shadow0_read_array(ecc
, SH_ESR
, j
));
636 edma_shadow0_write_array(ecc
, SH_ESR
, j
, mask
);
640 /* EDMA channel with event association */
641 dev_dbg(ecc
->dev
, "ER%d %08x\n", j
,
642 edma_shadow0_read_array(ecc
, SH_ER
, j
));
643 /* Clear any pending event or error */
644 edma_write_array(ecc
, EDMA_ECR
, j
, mask
);
645 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
647 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
648 edma_shadow0_write_array(ecc
, SH_EESR
, j
, mask
);
649 dev_dbg(ecc
->dev
, "EER%d %08x\n", j
,
650 edma_shadow0_read_array(ecc
, SH_EER
, j
));
658 * edma_stop - stops dma on the channel passed
659 * @ecc: pointer to edma_cc struct
660 * @channel: channel being deactivated
662 * Any active transfer is paused and all pending hardware events are cleared.
663 * The current transfer may not be resumed, and the channel's Parameter RAM
664 * should be reinitialized before being reused.
666 static void edma_stop(struct edma_cc
*ecc
, unsigned channel
)
668 if (ecc
->id
!= EDMA_CTLR(channel
)) {
669 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
670 ecc
->id
, EDMA_CTLR(channel
));
673 channel
= EDMA_CHAN_SLOT(channel
);
675 if (channel
< ecc
->num_channels
) {
676 int j
= channel
>> 5;
677 unsigned int mask
= BIT(channel
& 0x1f);
679 edma_shadow0_write_array(ecc
, SH_EECR
, j
, mask
);
680 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
681 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
682 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
684 /* clear possibly pending completion interrupt */
685 edma_shadow0_write_array(ecc
, SH_ICR
, j
, mask
);
687 dev_dbg(ecc
->dev
, "EER%d %08x\n", j
,
688 edma_shadow0_read_array(ecc
, SH_EER
, j
));
690 /* REVISIT: consider guarding against inappropriate event
691 * chaining by overwriting with dummy_paramset.
697 * Temporarily disable EDMA hardware events on the specified channel,
698 * preventing them from triggering new transfers
700 static void edma_pause(struct edma_cc
*ecc
, unsigned channel
)
702 if (ecc
->id
!= EDMA_CTLR(channel
)) {
703 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
704 ecc
->id
, EDMA_CTLR(channel
));
707 channel
= EDMA_CHAN_SLOT(channel
);
709 if (channel
< ecc
->num_channels
) {
710 unsigned int mask
= BIT(channel
& 0x1f);
712 edma_shadow0_write_array(ecc
, SH_EECR
, channel
>> 5, mask
);
716 /* Re-enable EDMA hardware events on the specified channel. */
717 static void edma_resume(struct edma_cc
*ecc
, unsigned channel
)
719 if (ecc
->id
!= EDMA_CTLR(channel
)) {
720 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
721 ecc
->id
, EDMA_CTLR(channel
));
724 channel
= EDMA_CHAN_SLOT(channel
);
726 if (channel
< ecc
->num_channels
) {
727 unsigned int mask
= BIT(channel
& 0x1f);
729 edma_shadow0_write_array(ecc
, SH_EESR
, channel
>> 5, mask
);
733 static int edma_trigger_channel(struct edma_cc
*ecc
, unsigned channel
)
737 if (ecc
->id
!= EDMA_CTLR(channel
)) {
738 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
739 ecc
->id
, EDMA_CTLR(channel
));
742 channel
= EDMA_CHAN_SLOT(channel
);
743 mask
= BIT(channel
& 0x1f);
745 edma_shadow0_write_array(ecc
, SH_ESR
, (channel
>> 5), mask
);
747 dev_dbg(ecc
->dev
, "ESR%d %08x\n", (channel
>> 5),
748 edma_shadow0_read_array(ecc
, SH_ESR
, (channel
>> 5)));
752 static void edma_clean_channel(struct edma_cc
*ecc
, unsigned channel
)
754 if (ecc
->id
!= EDMA_CTLR(channel
)) {
755 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
756 ecc
->id
, EDMA_CTLR(channel
));
759 channel
= EDMA_CHAN_SLOT(channel
);
761 if (channel
< ecc
->num_channels
) {
762 int j
= (channel
>> 5);
763 unsigned int mask
= BIT(channel
& 0x1f);
765 dev_dbg(ecc
->dev
, "EMR%d %08x\n", j
,
766 edma_read_array(ecc
, EDMA_EMR
, j
));
767 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
768 /* Clear the corresponding EMR bits */
769 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
771 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
772 edma_write(ecc
, EDMA_CCERRCLR
, BIT(16) | BIT(1) | BIT(0));
777 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
778 * @ecc: pointer to edma_cc struct
779 * @channel: specific channel to allocate; negative for "any unmapped channel"
780 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
781 * Controller (TC) executes requests using this channel. Use
782 * EVENTQ_DEFAULT unless you really need a high priority queue.
784 * This allocates a DMA channel and its associated parameter RAM slot.
785 * The parameter RAM is initialized to hold a dummy transfer.
787 * Normal use is to pass a specific channel number as @channel, to make
788 * use of hardware events mapped to that channel. When the channel will
789 * be used only for software triggering or event chaining, channels not
790 * mapped to hardware events (or mapped to unused events) are preferable.
792 * DMA transfers start from a channel using edma_start(), or by
793 * chaining. When the transfer described in that channel's parameter RAM
794 * slot completes, that slot's data may be reloaded through a link.
796 * DMA errors are only reported to the @callback associated with the
797 * channel driving that transfer, but transfer completion callbacks can
798 * be sent to another channel under control of the TCC field in
799 * the option word of the transfer's parameter RAM set. Drivers must not
800 * use DMA transfer completion callbacks for channels they did not allocate.
801 * (The same applies to TCC codes used in transfer chaining.)
803 * Returns the number of the channel, else negative errno.
805 static int edma_alloc_channel(struct edma_cc
*ecc
, int channel
,
806 enum dma_event_q eventq_no
)
811 if (!ecc
->unused_chan_list_done
) {
813 * Scan all the platform devices to find out the EDMA channels
814 * used and clear them in the unused list, making the rest
815 * available for ARM usage.
817 ret
= bus_for_each_dev(&platform_bus_type
, NULL
, ecc
,
818 prepare_unused_channel_list
);
822 ecc
->unused_chan_list_done
= true;
826 if (ecc
->id
!= EDMA_CTLR(channel
)) {
827 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n",
828 __func__
, ecc
->id
, EDMA_CTLR(channel
));
831 channel
= EDMA_CHAN_SLOT(channel
);
837 channel
= find_next_bit(ecc
->edma_unused
,
838 ecc
->num_channels
, channel
);
839 if (channel
== ecc
->num_channels
)
841 if (!test_and_set_bit(channel
, ecc
->edma_inuse
)) {
849 } else if (channel
>= ecc
->num_channels
) {
851 } else if (test_and_set_bit(channel
, ecc
->edma_inuse
)) {
855 /* ensure access through shadow region 0 */
856 edma_or_array2(ecc
, EDMA_DRAE
, 0, channel
>> 5, BIT(channel
& 0x1f));
858 /* ensure no events are pending */
859 edma_stop(ecc
, EDMA_CTLR_CHAN(ecc
->id
, channel
));
860 edma_write_slot(ecc
, channel
, &dummy_paramset
);
862 edma_setup_interrupt(ecc
, EDMA_CTLR_CHAN(ecc
->id
, channel
), true);
864 edma_map_dmach_to_queue(ecc
, channel
, eventq_no
);
866 return EDMA_CTLR_CHAN(ecc
->id
, channel
);
870 * edma_free_channel - deallocate DMA channel
871 * @ecc: pointer to edma_cc struct
872 * @channel: dma channel returned from edma_alloc_channel()
874 * This deallocates the DMA channel and associated parameter RAM slot
875 * allocated by edma_alloc_channel().
877 * Callers are responsible for ensuring the channel is inactive, and
878 * will not be reactivated by linking, chaining, or software calls to
881 static void edma_free_channel(struct edma_cc
*ecc
, unsigned channel
)
883 if (ecc
->id
!= EDMA_CTLR(channel
)) {
884 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
885 ecc
->id
, EDMA_CTLR(channel
));
888 channel
= EDMA_CHAN_SLOT(channel
);
890 if (channel
>= ecc
->num_channels
)
893 edma_setup_interrupt(ecc
, channel
, false);
894 /* REVISIT should probably take out of shadow region 0 */
896 edma_write_slot(ecc
, channel
, &dummy_paramset
);
897 clear_bit(channel
, ecc
->edma_inuse
);
900 /* Move channel to a specific event queue */
901 static void edma_assign_channel_eventq(struct edma_cc
*ecc
, unsigned channel
,
902 enum dma_event_q eventq_no
)
904 if (ecc
->id
!= EDMA_CTLR(channel
)) {
905 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
906 ecc
->id
, EDMA_CTLR(channel
));
909 channel
= EDMA_CHAN_SLOT(channel
);
911 if (channel
>= ecc
->num_channels
)
914 /* default to low priority queue */
915 if (eventq_no
== EVENTQ_DEFAULT
)
916 eventq_no
= ecc
->default_queue
;
917 if (eventq_no
>= ecc
->num_tc
)
920 edma_map_dmach_to_queue(ecc
, channel
, eventq_no
);
923 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
925 return container_of(d
, struct edma_cc
, dma_slave
);
928 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
930 return container_of(c
, struct edma_chan
, vchan
.chan
);
933 static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor
*tx
)
935 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
938 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
940 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
943 /* Dispatch a queued descriptor to the controller (caller holds lock) */
944 static void edma_execute(struct edma_chan
*echan
)
946 struct edma_cc
*ecc
= echan
->ecc
;
947 struct virt_dma_desc
*vdesc
;
948 struct edma_desc
*edesc
;
949 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
950 int i
, j
, left
, nslots
;
953 /* Setup is needed for the first transfer */
954 vdesc
= vchan_next_desc(&echan
->vchan
);
957 list_del(&vdesc
->node
);
958 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
961 edesc
= echan
->edesc
;
963 /* Find out how many left */
964 left
= edesc
->pset_nr
- edesc
->processed
;
965 nslots
= min(MAX_NR_SG
, left
);
968 /* Write descriptor PaRAM set(s) */
969 for (i
= 0; i
< nslots
; i
++) {
970 j
= i
+ edesc
->processed
;
971 edma_write_slot(ecc
, echan
->slot
[i
], &edesc
->pset
[j
].param
);
972 edesc
->sg_len
+= edesc
->pset
[j
].len
;
985 j
, echan
->ch_num
, echan
->slot
[i
],
986 edesc
->pset
[j
].param
.opt
,
987 edesc
->pset
[j
].param
.src
,
988 edesc
->pset
[j
].param
.dst
,
989 edesc
->pset
[j
].param
.a_b_cnt
,
990 edesc
->pset
[j
].param
.ccnt
,
991 edesc
->pset
[j
].param
.src_dst_bidx
,
992 edesc
->pset
[j
].param
.src_dst_cidx
,
993 edesc
->pset
[j
].param
.link_bcntrld
);
994 /* Link to the previous slot if not the last set */
995 if (i
!= (nslots
- 1))
996 edma_link(ecc
, echan
->slot
[i
], echan
->slot
[i
+ 1]);
999 edesc
->processed
+= nslots
;
1002 * If this is either the last set in a set of SG-list transactions
1003 * then setup a link to the dummy slot, this results in all future
1004 * events being absorbed and that's OK because we're done
1006 if (edesc
->processed
== edesc
->pset_nr
) {
1008 edma_link(ecc
, echan
->slot
[nslots
- 1], echan
->slot
[1]);
1010 edma_link(ecc
, echan
->slot
[nslots
- 1],
1011 echan
->ecc
->dummy_slot
);
1014 if (echan
->missed
) {
1016 * This happens due to setup times between intermediate
1017 * transfers in long SG lists which have to be broken up into
1018 * transfers of MAX_NR_SG
1020 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
1021 edma_clean_channel(ecc
, echan
->ch_num
);
1022 edma_stop(ecc
, echan
->ch_num
);
1023 edma_start(ecc
, echan
->ch_num
);
1024 edma_trigger_channel(ecc
, echan
->ch_num
);
1026 } else if (edesc
->processed
<= MAX_NR_SG
) {
1027 dev_dbg(dev
, "first transfer starting on channel %d\n",
1029 edma_start(ecc
, echan
->ch_num
);
1031 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
1032 echan
->ch_num
, edesc
->processed
);
1033 edma_resume(ecc
, echan
->ch_num
);
1037 static int edma_terminate_all(struct dma_chan
*chan
)
1039 struct edma_chan
*echan
= to_edma_chan(chan
);
1040 unsigned long flags
;
1043 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1046 * Stop DMA activity: we assume the callback will not be called
1047 * after edma_dma() returns (even if it does, it will see
1048 * echan->edesc is NULL and exit.)
1051 edma_stop(echan
->ecc
, echan
->ch_num
);
1052 /* Move the cyclic channel back to default queue */
1053 if (echan
->edesc
->cyclic
)
1054 edma_assign_channel_eventq(echan
->ecc
, echan
->ch_num
,
1057 * free the running request descriptor
1058 * since it is not in any of the vdesc lists
1060 edma_desc_free(&echan
->edesc
->vdesc
);
1061 echan
->edesc
= NULL
;
1064 vchan_get_all_descriptors(&echan
->vchan
, &head
);
1065 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1066 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
1071 static int edma_slave_config(struct dma_chan
*chan
,
1072 struct dma_slave_config
*cfg
)
1074 struct edma_chan
*echan
= to_edma_chan(chan
);
1076 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1077 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1080 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
1085 static int edma_dma_pause(struct dma_chan
*chan
)
1087 struct edma_chan
*echan
= to_edma_chan(chan
);
1092 edma_pause(echan
->ecc
, echan
->ch_num
);
1096 static int edma_dma_resume(struct dma_chan
*chan
)
1098 struct edma_chan
*echan
= to_edma_chan(chan
);
1100 edma_resume(echan
->ecc
, echan
->ch_num
);
1105 * A PaRAM set configuration abstraction used by other modes
1106 * @chan: Channel who's PaRAM set we're configuring
1107 * @pset: PaRAM set to initialize and setup.
1108 * @src_addr: Source address of the DMA
1109 * @dst_addr: Destination address of the DMA
1110 * @burst: In units of dev_width, how much to send
1111 * @dev_width: How much is the dev_width
1112 * @dma_length: Total length of the DMA transfer
1113 * @direction: Direction of the transfer
1115 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
1116 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
1117 enum dma_slave_buswidth dev_width
,
1118 unsigned int dma_length
,
1119 enum dma_transfer_direction direction
)
1121 struct edma_chan
*echan
= to_edma_chan(chan
);
1122 struct device
*dev
= chan
->device
->dev
;
1123 struct edmacc_param
*param
= &epset
->param
;
1124 int acnt
, bcnt
, ccnt
, cidx
;
1125 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
1130 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
1134 * If the maxburst is equal to the fifo width, use
1135 * A-synced transfers. This allows for large contiguous
1136 * buffer transfers using only one PaRAM set.
1140 * For the A-sync case, bcnt and ccnt are the remainder
1141 * and quotient respectively of the division of:
1142 * (dma_length / acnt) by (SZ_64K -1). This is so
1143 * that in case bcnt over flows, we have ccnt to use.
1144 * Note: In A-sync tranfer only, bcntrld is used, but it
1145 * only applies for sg_dma_len(sg) >= SZ_64K.
1146 * In this case, the best way adopted is- bccnt for the
1147 * first frame will be the remainder below. Then for
1148 * every successive frame, bcnt will be SZ_64K-1. This
1149 * is assured as bcntrld = 0xffff in end of function.
1152 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
1153 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
1155 * If bcnt is non-zero, we have a remainder and hence an
1156 * extra frame to transfer, so increment ccnt.
1165 * If maxburst is greater than the fifo address_width,
1166 * use AB-synced transfers where A count is the fifo
1167 * address_width and B count is the maxburst. In this
1168 * case, we are limited to transfers of C count frames
1169 * of (address_width * maxburst) where C count is limited
1170 * to SZ_64K-1. This places an upper bound on the length
1171 * of an SG segment that can be handled.
1175 ccnt
= dma_length
/ (acnt
* bcnt
);
1176 if (ccnt
> (SZ_64K
- 1)) {
1177 dev_err(dev
, "Exceeded max SG segment size\n");
1183 epset
->len
= dma_length
;
1185 if (direction
== DMA_MEM_TO_DEV
) {
1190 epset
->addr
= src_addr
;
1191 } else if (direction
== DMA_DEV_TO_MEM
) {
1196 epset
->addr
= dst_addr
;
1197 } else if (direction
== DMA_MEM_TO_MEM
) {
1203 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
1207 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
1208 /* Configure A or AB synchronized transfers */
1210 param
->opt
|= SYNCDIM
;
1212 param
->src
= src_addr
;
1213 param
->dst
= dst_addr
;
1215 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
1216 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
1218 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
1221 * Only time when (bcntrld) auto reload is required is for
1222 * A-sync case, and in this case, a requirement of reload value
1223 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1224 * and then later will be populated by edma_execute.
1226 param
->link_bcntrld
= 0xffffffff;
1230 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
1231 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1232 unsigned int sg_len
, enum dma_transfer_direction direction
,
1233 unsigned long tx_flags
, void *context
)
1235 struct edma_chan
*echan
= to_edma_chan(chan
);
1236 struct device
*dev
= chan
->device
->dev
;
1237 struct edma_desc
*edesc
;
1238 dma_addr_t src_addr
= 0, dst_addr
= 0;
1239 enum dma_slave_buswidth dev_width
;
1241 struct scatterlist
*sg
;
1244 if (unlikely(!echan
|| !sgl
|| !sg_len
))
1247 if (direction
== DMA_DEV_TO_MEM
) {
1248 src_addr
= echan
->cfg
.src_addr
;
1249 dev_width
= echan
->cfg
.src_addr_width
;
1250 burst
= echan
->cfg
.src_maxburst
;
1251 } else if (direction
== DMA_MEM_TO_DEV
) {
1252 dst_addr
= echan
->cfg
.dst_addr
;
1253 dev_width
= echan
->cfg
.dst_addr_width
;
1254 burst
= echan
->cfg
.dst_maxburst
;
1256 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1260 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1261 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1265 edesc
= kzalloc(sizeof(*edesc
) + sg_len
* sizeof(edesc
->pset
[0]),
1268 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1272 edesc
->pset_nr
= sg_len
;
1274 edesc
->direction
= direction
;
1275 edesc
->echan
= echan
;
1277 /* Allocate a PaRAM slot, if needed */
1278 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
1280 for (i
= 0; i
< nslots
; i
++) {
1281 if (echan
->slot
[i
] < 0) {
1283 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1284 if (echan
->slot
[i
] < 0) {
1286 dev_err(dev
, "%s: Failed to allocate slot\n",
1293 /* Configure PaRAM sets for each SG */
1294 for_each_sg(sgl
, sg
, sg_len
, i
) {
1295 /* Get address for each SG */
1296 if (direction
== DMA_DEV_TO_MEM
)
1297 dst_addr
= sg_dma_address(sg
);
1299 src_addr
= sg_dma_address(sg
);
1301 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1302 dst_addr
, burst
, dev_width
,
1303 sg_dma_len(sg
), direction
);
1309 edesc
->absync
= ret
;
1310 edesc
->residue
+= sg_dma_len(sg
);
1312 /* If this is the last in a current SG set of transactions,
1313 enable interrupts so that next set is processed */
1314 if (!((i
+1) % MAX_NR_SG
))
1315 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1317 /* If this is the last set, enable completion interrupt flag */
1318 if (i
== sg_len
- 1)
1319 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1321 edesc
->residue_stat
= edesc
->residue
;
1323 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1326 static struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
1327 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1328 size_t len
, unsigned long tx_flags
)
1331 struct edma_desc
*edesc
;
1332 struct device
*dev
= chan
->device
->dev
;
1333 struct edma_chan
*echan
= to_edma_chan(chan
);
1335 if (unlikely(!echan
|| !len
))
1338 edesc
= kzalloc(sizeof(*edesc
) + sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
1340 dev_dbg(dev
, "Failed to allocate a descriptor\n");
1346 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
1347 DMA_SLAVE_BUSWIDTH_4_BYTES
, len
, DMA_MEM_TO_MEM
);
1351 edesc
->absync
= ret
;
1354 * Enable intermediate transfer chaining to re-trigger channel
1355 * on completion of every TR, and enable transfer-completion
1356 * interrupt on completion of the whole transfer.
1358 edesc
->pset
[0].param
.opt
|= ITCCHEN
;
1359 edesc
->pset
[0].param
.opt
|= TCINTEN
;
1361 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1364 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
1365 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1366 size_t period_len
, enum dma_transfer_direction direction
,
1367 unsigned long tx_flags
)
1369 struct edma_chan
*echan
= to_edma_chan(chan
);
1370 struct device
*dev
= chan
->device
->dev
;
1371 struct edma_desc
*edesc
;
1372 dma_addr_t src_addr
, dst_addr
;
1373 enum dma_slave_buswidth dev_width
;
1377 if (unlikely(!echan
|| !buf_len
|| !period_len
))
1380 if (direction
== DMA_DEV_TO_MEM
) {
1381 src_addr
= echan
->cfg
.src_addr
;
1382 dst_addr
= buf_addr
;
1383 dev_width
= echan
->cfg
.src_addr_width
;
1384 burst
= echan
->cfg
.src_maxburst
;
1385 } else if (direction
== DMA_MEM_TO_DEV
) {
1386 src_addr
= buf_addr
;
1387 dst_addr
= echan
->cfg
.dst_addr
;
1388 dev_width
= echan
->cfg
.dst_addr_width
;
1389 burst
= echan
->cfg
.dst_maxburst
;
1391 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1395 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1396 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1400 if (unlikely(buf_len
% period_len
)) {
1401 dev_err(dev
, "Period should be multiple of Buffer length\n");
1405 nslots
= (buf_len
/ period_len
) + 1;
1408 * Cyclic DMA users such as audio cannot tolerate delays introduced
1409 * by cases where the number of periods is more than the maximum
1410 * number of SGs the EDMA driver can handle at a time. For DMA types
1411 * such as Slave SGs, such delays are tolerable and synchronized,
1412 * but the synchronization is difficult to achieve with Cyclic and
1413 * cannot be guaranteed, so we error out early.
1415 if (nslots
> MAX_NR_SG
)
1418 edesc
= kzalloc(sizeof(*edesc
) + nslots
* sizeof(edesc
->pset
[0]),
1421 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1426 edesc
->pset_nr
= nslots
;
1427 edesc
->residue
= edesc
->residue_stat
= buf_len
;
1428 edesc
->direction
= direction
;
1429 edesc
->echan
= echan
;
1431 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1432 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
1434 for (i
= 0; i
< nslots
; i
++) {
1435 /* Allocate a PaRAM slot, if needed */
1436 if (echan
->slot
[i
] < 0) {
1438 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1439 if (echan
->slot
[i
] < 0) {
1441 dev_err(dev
, "%s: Failed to allocate slot\n",
1447 if (i
== nslots
- 1) {
1448 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
1449 sizeof(edesc
->pset
[0]));
1453 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1454 dst_addr
, burst
, dev_width
, period_len
,
1461 if (direction
== DMA_DEV_TO_MEM
)
1462 dst_addr
+= period_len
;
1464 src_addr
+= period_len
;
1466 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
1479 i
, echan
->ch_num
, echan
->slot
[i
],
1480 edesc
->pset
[i
].param
.opt
,
1481 edesc
->pset
[i
].param
.src
,
1482 edesc
->pset
[i
].param
.dst
,
1483 edesc
->pset
[i
].param
.a_b_cnt
,
1484 edesc
->pset
[i
].param
.ccnt
,
1485 edesc
->pset
[i
].param
.src_dst_bidx
,
1486 edesc
->pset
[i
].param
.src_dst_cidx
,
1487 edesc
->pset
[i
].param
.link_bcntrld
);
1489 edesc
->absync
= ret
;
1492 * Enable period interrupt only if it is requested
1494 if (tx_flags
& DMA_PREP_INTERRUPT
)
1495 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1498 /* Place the cyclic channel to highest priority queue */
1499 edma_assign_channel_eventq(echan
->ecc
, echan
->ch_num
, EVENTQ_0
);
1501 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1504 static void edma_completion_handler(struct edma_chan
*echan
)
1506 struct edma_cc
*ecc
= echan
->ecc
;
1507 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1508 struct edma_desc
*edesc
= echan
->edesc
;
1513 spin_lock(&echan
->vchan
.lock
);
1514 if (edesc
->cyclic
) {
1515 vchan_cyclic_callback(&edesc
->vdesc
);
1516 spin_unlock(&echan
->vchan
.lock
);
1518 } else if (edesc
->processed
== edesc
->pset_nr
) {
1520 edma_stop(ecc
, echan
->ch_num
);
1521 vchan_cookie_complete(&edesc
->vdesc
);
1522 echan
->edesc
= NULL
;
1524 dev_dbg(dev
, "Transfer completed on channel %d\n",
1527 dev_dbg(dev
, "Sub transfer completed on channel %d\n",
1530 edma_pause(ecc
, echan
->ch_num
);
1532 /* Update statistics for tx_status */
1533 edesc
->residue
-= edesc
->sg_len
;
1534 edesc
->residue_stat
= edesc
->residue
;
1535 edesc
->processed_stat
= edesc
->processed
;
1537 edma_execute(echan
);
1539 spin_unlock(&echan
->vchan
.lock
);
1542 /* eDMA interrupt handler */
1543 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
1545 struct edma_cc
*ecc
= data
;
1555 dev_vdbg(ecc
->dev
, "dma_irq_handler\n");
1557 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 0);
1559 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 1);
1562 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 1);
1565 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 0);
1573 slot
= __ffs(sh_ipr
);
1574 sh_ipr
&= ~(BIT(slot
));
1576 if (sh_ier
& BIT(slot
)) {
1577 channel
= (bank
<< 5) | slot
;
1578 /* Clear the corresponding IPR bits */
1579 edma_shadow0_write_array(ecc
, SH_ICR
, bank
, BIT(slot
));
1580 edma_completion_handler(&ecc
->slave_chans
[channel
]);
1584 edma_shadow0_write(ecc
, SH_IEVAL
, 1);
1588 static void edma_error_handler(struct edma_chan
*echan
)
1590 struct edma_cc
*ecc
= echan
->ecc
;
1591 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1592 struct edmacc_param p
;
1597 spin_lock(&echan
->vchan
.lock
);
1599 edma_read_slot(ecc
, echan
->slot
[0], &p
);
1601 * Issue later based on missed flag which will be sure
1603 * (1) we finished transmitting an intermediate slot and
1604 * edma_execute is coming up.
1605 * (2) or we finished current transfer and issue will
1606 * call edma_execute.
1608 * Important note: issuing can be dangerous here and
1609 * lead to some nasty recursion when we are in a NULL
1610 * slot. So we avoid doing so and set the missed flag.
1612 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
1613 dev_dbg(dev
, "Error on null slot, setting miss\n");
1617 * The slot is already programmed but the event got
1618 * missed, so its safe to issue it here.
1620 dev_dbg(dev
, "Missed event, TRIGGERING\n");
1621 edma_clean_channel(ecc
, echan
->ch_num
);
1622 edma_stop(ecc
, echan
->ch_num
);
1623 edma_start(ecc
, echan
->ch_num
);
1624 edma_trigger_channel(ecc
, echan
->ch_num
);
1626 spin_unlock(&echan
->vchan
.lock
);
1629 /* eDMA error interrupt handler */
1630 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
1632 struct edma_cc
*ecc
= data
;
1635 unsigned int cnt
= 0;
1641 dev_vdbg(ecc
->dev
, "dma_ccerr_handler\n");
1643 if ((edma_read_array(ecc
, EDMA_EMR
, 0) == 0) &&
1644 (edma_read_array(ecc
, EDMA_EMR
, 1) == 0) &&
1645 (edma_read(ecc
, EDMA_QEMR
) == 0) &&
1646 (edma_read(ecc
, EDMA_CCERR
) == 0))
1652 if (edma_read_array(ecc
, EDMA_EMR
, 0))
1654 else if (edma_read_array(ecc
, EDMA_EMR
, 1))
1657 dev_dbg(ecc
->dev
, "EMR%d %08x\n", j
,
1658 edma_read_array(ecc
, EDMA_EMR
, j
));
1659 for (i
= 0; i
< 32; i
++) {
1660 int k
= (j
<< 5) + i
;
1662 if (edma_read_array(ecc
, EDMA_EMR
, j
) &
1664 /* Clear the corresponding EMR bits */
1665 edma_write_array(ecc
, EDMA_EMCR
, j
,
1668 edma_shadow0_write_array(ecc
, SH_SECR
,
1670 edma_error_handler(&ecc
->slave_chans
[k
]);
1673 } else if (edma_read(ecc
, EDMA_QEMR
)) {
1674 dev_dbg(ecc
->dev
, "QEMR %02x\n",
1675 edma_read(ecc
, EDMA_QEMR
));
1676 for (i
= 0; i
< 8; i
++) {
1677 if (edma_read(ecc
, EDMA_QEMR
) & BIT(i
)) {
1678 /* Clear the corresponding IPR bits */
1679 edma_write(ecc
, EDMA_QEMCR
, BIT(i
));
1680 edma_shadow0_write(ecc
, SH_QSECR
,
1683 /* NOTE: not reported!! */
1686 } else if (edma_read(ecc
, EDMA_CCERR
)) {
1687 dev_dbg(ecc
->dev
, "CCERR %08x\n",
1688 edma_read(ecc
, EDMA_CCERR
));
1689 /* FIXME: CCERR.BIT(16) ignored! much better
1690 * to just write CCERRCLR with CCERR value...
1692 for (i
= 0; i
< 8; i
++) {
1693 if (edma_read(ecc
, EDMA_CCERR
) & BIT(i
)) {
1694 /* Clear the corresponding IPR bits */
1695 edma_write(ecc
, EDMA_CCERRCLR
, BIT(i
));
1697 /* NOTE: not reported!! */
1701 if ((edma_read_array(ecc
, EDMA_EMR
, 0) == 0) &&
1702 (edma_read_array(ecc
, EDMA_EMR
, 1) == 0) &&
1703 (edma_read(ecc
, EDMA_QEMR
) == 0) &&
1704 (edma_read(ecc
, EDMA_CCERR
) == 0))
1710 edma_write(ecc
, EDMA_EEVAL
, 1);
1714 /* Alloc channel resources */
1715 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
1717 struct edma_chan
*echan
= to_edma_chan(chan
);
1718 struct device
*dev
= chan
->device
->dev
;
1723 a_ch_num
= edma_alloc_channel(echan
->ecc
, echan
->ch_num
, EVENTQ_DEFAULT
);
1730 if (a_ch_num
!= echan
->ch_num
) {
1731 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
1732 EDMA_CTLR(echan
->ch_num
),
1733 EDMA_CHAN_SLOT(echan
->ch_num
));
1735 goto err_wrong_chan
;
1738 echan
->alloced
= true;
1739 echan
->slot
[0] = echan
->ch_num
;
1741 dev_dbg(dev
, "allocated channel %d for %u:%u\n", echan
->ch_num
,
1742 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
1747 edma_free_channel(echan
->ecc
, a_ch_num
);
1752 /* Free channel resources */
1753 static void edma_free_chan_resources(struct dma_chan
*chan
)
1755 struct edma_chan
*echan
= to_edma_chan(chan
);
1758 /* Terminate transfers */
1759 edma_stop(echan
->ecc
, echan
->ch_num
);
1761 vchan_free_chan_resources(&echan
->vchan
);
1763 /* Free EDMA PaRAM slots */
1764 for (i
= 1; i
< EDMA_MAX_SLOTS
; i
++) {
1765 if (echan
->slot
[i
] >= 0) {
1766 edma_free_slot(echan
->ecc
, echan
->slot
[i
]);
1767 echan
->slot
[i
] = -1;
1771 /* Free EDMA channel */
1772 if (echan
->alloced
) {
1773 edma_free_channel(echan
->ecc
, echan
->ch_num
);
1774 echan
->alloced
= false;
1777 dev_dbg(chan
->device
->dev
, "freeing channel for %u\n", echan
->ch_num
);
1780 /* Send pending descriptor to hardware */
1781 static void edma_issue_pending(struct dma_chan
*chan
)
1783 struct edma_chan
*echan
= to_edma_chan(chan
);
1784 unsigned long flags
;
1786 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1787 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
1788 edma_execute(echan
);
1789 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1792 static u32
edma_residue(struct edma_desc
*edesc
)
1794 bool dst
= edesc
->direction
== DMA_DEV_TO_MEM
;
1795 struct edma_pset
*pset
= edesc
->pset
;
1796 dma_addr_t done
, pos
;
1800 * We always read the dst/src position from the first RamPar
1801 * pset. That's the one which is active now.
1803 pos
= edma_get_position(edesc
->echan
->ecc
, edesc
->echan
->slot
[0], dst
);
1806 * Cyclic is simple. Just subtract pset[0].addr from pos.
1808 * We never update edesc->residue in the cyclic case, so we
1809 * can tell the remaining room to the end of the circular
1812 if (edesc
->cyclic
) {
1813 done
= pos
- pset
->addr
;
1814 edesc
->residue_stat
= edesc
->residue
- done
;
1815 return edesc
->residue_stat
;
1819 * For SG operation we catch up with the last processed
1822 pset
+= edesc
->processed_stat
;
1824 for (i
= edesc
->processed_stat
; i
< edesc
->processed
; i
++, pset
++) {
1826 * If we are inside this pset address range, we know
1827 * this is the active one. Get the current delta and
1828 * stop walking the psets.
1830 if (pos
>= pset
->addr
&& pos
< pset
->addr
+ pset
->len
)
1831 return edesc
->residue_stat
- (pos
- pset
->addr
);
1833 /* Otherwise mark it done and update residue_stat. */
1834 edesc
->processed_stat
++;
1835 edesc
->residue_stat
-= pset
->len
;
1837 return edesc
->residue_stat
;
1840 /* Check request completion status */
1841 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
1842 dma_cookie_t cookie
,
1843 struct dma_tx_state
*txstate
)
1845 struct edma_chan
*echan
= to_edma_chan(chan
);
1846 struct virt_dma_desc
*vdesc
;
1847 enum dma_status ret
;
1848 unsigned long flags
;
1850 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1851 if (ret
== DMA_COMPLETE
|| !txstate
)
1854 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1855 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
)
1856 txstate
->residue
= edma_residue(echan
->edesc
);
1857 else if ((vdesc
= vchan_find_desc(&echan
->vchan
, cookie
)))
1858 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
1859 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1864 static void __init
edma_chan_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1865 struct edma_chan
*echans
)
1869 for (i
= 0; i
< ecc
->num_channels
; i
++) {
1870 struct edma_chan
*echan
= &echans
[i
];
1871 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->id
, i
);
1873 echan
->vchan
.desc_free
= edma_desc_free
;
1875 vchan_init(&echan
->vchan
, dma
);
1877 INIT_LIST_HEAD(&echan
->node
);
1878 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
1879 echan
->slot
[j
] = -1;
1883 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1884 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1885 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1886 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1888 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1891 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
1892 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
1893 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
1894 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
1895 dma
->device_free_chan_resources
= edma_free_chan_resources
;
1896 dma
->device_issue_pending
= edma_issue_pending
;
1897 dma
->device_tx_status
= edma_tx_status
;
1898 dma
->device_config
= edma_slave_config
;
1899 dma
->device_pause
= edma_dma_pause
;
1900 dma
->device_resume
= edma_dma_resume
;
1901 dma
->device_terminate_all
= edma_terminate_all
;
1903 dma
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1904 dma
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1905 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1906 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1911 * code using dma memcpy must make sure alignment of
1912 * length is at dma->copy_align boundary.
1914 dma
->copy_align
= DMAENGINE_ALIGN_4_BYTES
;
1916 INIT_LIST_HEAD(&dma
->channels
);
1919 static int edma_setup_from_hw(struct device
*dev
, struct edma_soc_info
*pdata
,
1920 struct edma_cc
*ecc
)
1924 s8 (*queue_priority_map
)[2];
1926 /* Decode the eDMA3 configuration from CCCFG register */
1927 cccfg
= edma_read(ecc
, EDMA_CCCFG
);
1929 value
= GET_NUM_REGN(cccfg
);
1930 ecc
->num_region
= BIT(value
);
1932 value
= GET_NUM_DMACH(cccfg
);
1933 ecc
->num_channels
= BIT(value
+ 1);
1935 value
= GET_NUM_PAENTRY(cccfg
);
1936 ecc
->num_slots
= BIT(value
+ 4);
1938 value
= GET_NUM_EVQUE(cccfg
);
1939 ecc
->num_tc
= value
+ 1;
1941 dev_dbg(dev
, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg
);
1942 dev_dbg(dev
, "num_region: %u\n", ecc
->num_region
);
1943 dev_dbg(dev
, "num_channels: %u\n", ecc
->num_channels
);
1944 dev_dbg(dev
, "num_slots: %u\n", ecc
->num_slots
);
1945 dev_dbg(dev
, "num_tc: %u\n", ecc
->num_tc
);
1947 /* Nothing need to be done if queue priority is provided */
1948 if (pdata
->queue_priority_mapping
)
1952 * Configure TC/queue priority as follows:
1957 * The meaning of priority numbers: 0 highest priority, 7 lowest
1958 * priority. So Q0 is the highest priority queue and the last queue has
1959 * the lowest priority.
1961 queue_priority_map
= devm_kcalloc(dev
, ecc
->num_tc
+ 1, sizeof(s8
),
1963 if (!queue_priority_map
)
1966 for (i
= 0; i
< ecc
->num_tc
; i
++) {
1967 queue_priority_map
[i
][0] = i
;
1968 queue_priority_map
[i
][1] = i
;
1970 queue_priority_map
[i
][0] = -1;
1971 queue_priority_map
[i
][1] = -1;
1973 pdata
->queue_priority_mapping
= queue_priority_map
;
1974 /* Default queue has the lowest priority */
1975 pdata
->default_queue
= i
- 1;
1980 #if IS_ENABLED(CONFIG_OF)
1981 static int edma_xbar_event_map(struct device
*dev
, struct edma_soc_info
*pdata
,
1984 const char pname
[] = "ti,edma-xbar-event-map";
1985 struct resource res
;
1987 s16 (*xbar_chans
)[2];
1988 size_t nelm
= sz
/ sizeof(s16
);
1989 u32 shift
, offset
, mux
;
1992 xbar_chans
= devm_kcalloc(dev
, nelm
+ 2, sizeof(s16
), GFP_KERNEL
);
1996 ret
= of_address_to_resource(dev
->of_node
, 1, &res
);
2000 xbar
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
2004 ret
= of_property_read_u16_array(dev
->of_node
, pname
, (u16
*)xbar_chans
,
2009 /* Invalidate last entry for the other user of this mess */
2011 xbar_chans
[nelm
][0] = -1;
2012 xbar_chans
[nelm
][1] = -1;
2014 for (i
= 0; i
< nelm
; i
++) {
2015 shift
= (xbar_chans
[i
][1] & 0x03) << 3;
2016 offset
= xbar_chans
[i
][1] & 0xfffffffc;
2017 mux
= readl(xbar
+ offset
);
2018 mux
&= ~(0xff << shift
);
2019 mux
|= xbar_chans
[i
][0] << shift
;
2020 writel(mux
, (xbar
+ offset
));
2023 pdata
->xbar_chans
= (const s16 (*)[2]) xbar_chans
;
2027 static int edma_of_parse_dt(struct device
*dev
, struct edma_soc_info
*pdata
)
2030 struct property
*prop
;
2032 struct edma_rsv_info
*rsv_info
;
2034 rsv_info
= devm_kzalloc(dev
, sizeof(struct edma_rsv_info
), GFP_KERNEL
);
2037 pdata
->rsv
= rsv_info
;
2039 prop
= of_find_property(dev
->of_node
, "ti,edma-xbar-event-map", &sz
);
2041 ret
= edma_xbar_event_map(dev
, pdata
, sz
);
2046 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
2048 struct edma_soc_info
*info
;
2051 info
= devm_kzalloc(dev
, sizeof(struct edma_soc_info
), GFP_KERNEL
);
2053 return ERR_PTR(-ENOMEM
);
2055 ret
= edma_of_parse_dt(dev
, info
);
2057 return ERR_PTR(ret
);
2062 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
2064 return ERR_PTR(-EINVAL
);
2068 static int edma_probe(struct platform_device
*pdev
)
2070 struct edma_soc_info
*info
= pdev
->dev
.platform_data
;
2071 s8 (*queue_priority_mapping
)[2];
2073 const s16 (*rsv_chans
)[2];
2074 const s16 (*rsv_slots
)[2];
2075 const s16 (*xbar_chans
)[2];
2078 struct resource
*mem
;
2079 struct device_node
*node
= pdev
->dev
.of_node
;
2080 struct device
*dev
= &pdev
->dev
;
2081 struct edma_cc
*ecc
;
2085 info
= edma_setup_info_from_dt(dev
);
2087 dev_err(dev
, "failed to get DT data\n");
2088 return PTR_ERR(info
);
2095 pm_runtime_enable(dev
);
2096 ret
= pm_runtime_get_sync(dev
);
2098 dev_err(dev
, "pm_runtime_get_sync() failed\n");
2102 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2106 ecc
= devm_kzalloc(dev
, sizeof(*ecc
), GFP_KERNEL
);
2108 dev_err(dev
, "Can't allocate controller\n");
2114 /* When booting with DT the pdev->id is -1 */
2118 mem
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "edma3_cc");
2120 dev_dbg(dev
, "mem resource not found, using index 0\n");
2121 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2123 dev_err(dev
, "no mem resource?\n");
2127 ecc
->base
= devm_ioremap_resource(dev
, mem
);
2128 if (IS_ERR(ecc
->base
))
2129 return PTR_ERR(ecc
->base
);
2131 platform_set_drvdata(pdev
, ecc
);
2133 /* Get eDMA3 configuration from IP */
2134 ret
= edma_setup_from_hw(dev
, info
, ecc
);
2138 /* Allocate memory based on the information we got from the IP */
2139 ecc
->slave_chans
= devm_kcalloc(dev
, ecc
->num_channels
,
2140 sizeof(*ecc
->slave_chans
), GFP_KERNEL
);
2141 if (!ecc
->slave_chans
)
2144 ecc
->edma_unused
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_channels
),
2145 sizeof(unsigned long), GFP_KERNEL
);
2146 if (!ecc
->edma_unused
)
2149 ecc
->edma_inuse
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_slots
),
2150 sizeof(unsigned long), GFP_KERNEL
);
2151 if (!ecc
->edma_inuse
)
2154 ecc
->default_queue
= info
->default_queue
;
2156 for (i
= 0; i
< ecc
->num_slots
; i
++)
2157 edma_write_slot(ecc
, i
, &dummy_paramset
);
2159 /* Mark all channels as unused */
2160 memset(ecc
->edma_unused
, 0xff, sizeof(ecc
->edma_unused
));
2163 /* Clear the reserved channels in unused list */
2164 rsv_chans
= info
->rsv
->rsv_chans
;
2166 for (i
= 0; rsv_chans
[i
][0] != -1; i
++) {
2167 off
= rsv_chans
[i
][0];
2168 ln
= rsv_chans
[i
][1];
2169 clear_bits(off
, ln
, ecc
->edma_unused
);
2173 /* Set the reserved slots in inuse list */
2174 rsv_slots
= info
->rsv
->rsv_slots
;
2176 for (i
= 0; rsv_slots
[i
][0] != -1; i
++) {
2177 off
= rsv_slots
[i
][0];
2178 ln
= rsv_slots
[i
][1];
2179 set_bits(off
, ln
, ecc
->edma_inuse
);
2184 /* Clear the xbar mapped channels in unused list */
2185 xbar_chans
= info
->xbar_chans
;
2187 for (i
= 0; xbar_chans
[i
][1] != -1; i
++) {
2188 off
= xbar_chans
[i
][1];
2189 clear_bits(off
, 1, ecc
->edma_unused
);
2193 irq
= platform_get_irq_byname(pdev
, "edma3_ccint");
2194 if (irq
< 0 && node
)
2195 irq
= irq_of_parse_and_map(node
, 0);
2198 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccint",
2200 ret
= devm_request_irq(dev
, irq
, dma_irq_handler
, 0, irq_name
,
2203 dev_err(dev
, "CCINT (%d) failed --> %d\n", irq
, ret
);
2208 irq
= platform_get_irq_byname(pdev
, "edma3_ccerrint");
2209 if (irq
< 0 && node
)
2210 irq
= irq_of_parse_and_map(node
, 2);
2213 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccerrint",
2215 ret
= devm_request_irq(dev
, irq
, dma_ccerr_handler
, 0, irq_name
,
2218 dev_err(dev
, "CCERRINT (%d) failed --> %d\n", irq
, ret
);
2223 for (i
= 0; i
< ecc
->num_channels
; i
++)
2224 edma_map_dmach_to_queue(ecc
, i
, info
->default_queue
);
2226 queue_priority_mapping
= info
->queue_priority_mapping
;
2228 /* Event queue priority mapping */
2229 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2230 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2231 queue_priority_mapping
[i
][1]);
2233 /* Map the channel to param entry if channel mapping logic exist */
2234 if (edma_read(ecc
, EDMA_CCCFG
) & CHMAP_EXIST
)
2235 edma_direct_dmach_to_param_mapping(ecc
);
2237 for (i
= 0; i
< ecc
->num_region
; i
++) {
2238 edma_write_array2(ecc
, EDMA_DRAE
, i
, 0, 0x0);
2239 edma_write_array2(ecc
, EDMA_DRAE
, i
, 1, 0x0);
2240 edma_write_array(ecc
, EDMA_QRAE
, i
, 0x0);
2244 ecc
->dummy_slot
= edma_alloc_slot(ecc
, EDMA_SLOT_ANY
);
2245 if (ecc
->dummy_slot
< 0) {
2246 dev_err(dev
, "Can't allocate PaRAM dummy slot\n");
2247 return ecc
->dummy_slot
;
2250 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
2251 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
2252 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
2253 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
2255 edma_dma_init(ecc
, &ecc
->dma_slave
, dev
);
2257 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
2259 ret
= dma_async_device_register(&ecc
->dma_slave
);
2264 of_dma_controller_register(node
, of_dma_xlate_by_chan_id
,
2267 dev_info(dev
, "TI EDMA DMA engine driver\n");
2272 edma_free_slot(ecc
, ecc
->dummy_slot
);
2276 static int edma_remove(struct platform_device
*pdev
)
2278 struct device
*dev
= &pdev
->dev
;
2279 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2282 of_dma_controller_free(dev
->of_node
);
2283 dma_async_device_unregister(&ecc
->dma_slave
);
2284 edma_free_slot(ecc
, ecc
->dummy_slot
);
2289 #ifdef CONFIG_PM_SLEEP
2290 static int edma_pm_resume(struct device
*dev
)
2292 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2294 s8 (*queue_priority_mapping
)[2];
2296 queue_priority_mapping
= ecc
->info
->queue_priority_mapping
;
2298 /* Event queue priority mapping */
2299 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2300 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2301 queue_priority_mapping
[i
][1]);
2303 /* Map the channel to param entry if channel mapping logic */
2304 if (edma_read(ecc
, EDMA_CCCFG
) & CHMAP_EXIST
)
2305 edma_direct_dmach_to_param_mapping(ecc
);
2307 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2308 if (test_bit(i
, ecc
->edma_inuse
)) {
2309 /* ensure access through shadow region 0 */
2310 edma_or_array2(ecc
, EDMA_DRAE
, 0, i
>> 5,
2313 edma_setup_interrupt(ecc
, EDMA_CTLR_CHAN(ecc
->id
, i
),
2322 static const struct dev_pm_ops edma_pm_ops
= {
2323 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL
, edma_pm_resume
)
2326 static struct platform_driver edma_driver
= {
2327 .probe
= edma_probe
,
2328 .remove
= edma_remove
,
2332 .of_match_table
= edma_of_ids
,
2336 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
2338 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
2339 struct edma_chan
*echan
= to_edma_chan(chan
);
2340 unsigned ch_req
= *(unsigned *)param
;
2341 return ch_req
== echan
->ch_num
;
2345 EXPORT_SYMBOL(edma_filter_fn
);
2347 static int edma_init(void)
2349 return platform_driver_register(&edma_driver
);
2351 subsys_initcall(edma_init
);
2353 static void __exit
edma_exit(void)
2355 platform_driver_unregister(&edma_driver
);
2357 module_exit(edma_exit
);
2359 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2360 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2361 MODULE_LICENSE("GPL v2");