2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/edma.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/of_dma.h>
33 #include <linux/of_irq.h>
34 #include <linux/pm_runtime.h>
36 #include <linux/platform_data/edma.h>
38 /* Offsets matching "struct edmacc_param" */
41 #define PARM_A_B_CNT 0x08
43 #define PARM_SRC_DST_BIDX 0x10
44 #define PARM_LINK_BCNTRLD 0x14
45 #define PARM_SRC_DST_CIDX 0x18
46 #define PARM_CCNT 0x1c
48 #define PARM_SIZE 0x20
50 /* Offsets for EDMA CC global channel registers and their shadows */
51 #define SH_ER 0x00 /* 64 bits */
52 #define SH_ECR 0x08 /* 64 bits */
53 #define SH_ESR 0x10 /* 64 bits */
54 #define SH_CER 0x18 /* 64 bits */
55 #define SH_EER 0x20 /* 64 bits */
56 #define SH_EECR 0x28 /* 64 bits */
57 #define SH_EESR 0x30 /* 64 bits */
58 #define SH_SER 0x38 /* 64 bits */
59 #define SH_SECR 0x40 /* 64 bits */
60 #define SH_IER 0x50 /* 64 bits */
61 #define SH_IECR 0x58 /* 64 bits */
62 #define SH_IESR 0x60 /* 64 bits */
63 #define SH_IPR 0x68 /* 64 bits */
64 #define SH_ICR 0x70 /* 64 bits */
74 /* Offsets for EDMA CC global registers */
75 #define EDMA_REV 0x0000
76 #define EDMA_CCCFG 0x0004
77 #define EDMA_QCHMAP 0x0200 /* 8 registers */
78 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
79 #define EDMA_QDMAQNUM 0x0260
80 #define EDMA_QUETCMAP 0x0280
81 #define EDMA_QUEPRI 0x0284
82 #define EDMA_EMR 0x0300 /* 64 bits */
83 #define EDMA_EMCR 0x0308 /* 64 bits */
84 #define EDMA_QEMR 0x0310
85 #define EDMA_QEMCR 0x0314
86 #define EDMA_CCERR 0x0318
87 #define EDMA_CCERRCLR 0x031c
88 #define EDMA_EEVAL 0x0320
89 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
90 #define EDMA_QRAE 0x0380 /* 4 registers */
91 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
92 #define EDMA_QSTAT 0x0600 /* 2 registers */
93 #define EDMA_QWMTHRA 0x0620
94 #define EDMA_QWMTHRB 0x0624
95 #define EDMA_CCSTAT 0x0640
97 #define EDMA_M 0x1000 /* global channel registers */
98 #define EDMA_ECR 0x1008
99 #define EDMA_ECRH 0x100C
100 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
101 #define EDMA_PARM 0x4000 /* 128 param entries */
103 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
105 #define EDMA_DCHMAP 0x0100 /* 64 registers */
108 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
109 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
110 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
111 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
112 #define CHMAP_EXIST BIT(24)
114 #define EDMA_MAX_DMACH 64
115 #define EDMA_MAX_PARAMENTRY 512
117 /*****************************************************************************/
119 static void __iomem
*edmacc_regs_base
[EDMA_MAX_CC
];
121 static inline unsigned int edma_read(unsigned ctlr
, int offset
)
123 return (unsigned int)__raw_readl(edmacc_regs_base
[ctlr
] + offset
);
126 static inline void edma_write(unsigned ctlr
, int offset
, int val
)
128 __raw_writel(val
, edmacc_regs_base
[ctlr
] + offset
);
130 static inline void edma_modify(unsigned ctlr
, int offset
, unsigned and,
133 unsigned val
= edma_read(ctlr
, offset
);
136 edma_write(ctlr
, offset
, val
);
138 static inline void edma_and(unsigned ctlr
, int offset
, unsigned and)
140 unsigned val
= edma_read(ctlr
, offset
);
142 edma_write(ctlr
, offset
, val
);
144 static inline void edma_or(unsigned ctlr
, int offset
, unsigned or)
146 unsigned val
= edma_read(ctlr
, offset
);
148 edma_write(ctlr
, offset
, val
);
150 static inline unsigned int edma_read_array(unsigned ctlr
, int offset
, int i
)
152 return edma_read(ctlr
, offset
+ (i
<< 2));
154 static inline void edma_write_array(unsigned ctlr
, int offset
, int i
,
157 edma_write(ctlr
, offset
+ (i
<< 2), val
);
159 static inline void edma_modify_array(unsigned ctlr
, int offset
, int i
,
160 unsigned and, unsigned or)
162 edma_modify(ctlr
, offset
+ (i
<< 2), and, or);
164 static inline void edma_or_array(unsigned ctlr
, int offset
, int i
, unsigned or)
166 edma_or(ctlr
, offset
+ (i
<< 2), or);
168 static inline void edma_or_array2(unsigned ctlr
, int offset
, int i
, int j
,
171 edma_or(ctlr
, offset
+ ((i
*2 + j
) << 2), or);
173 static inline void edma_write_array2(unsigned ctlr
, int offset
, int i
, int j
,
176 edma_write(ctlr
, offset
+ ((i
*2 + j
) << 2), val
);
178 static inline unsigned int edma_shadow0_read(unsigned ctlr
, int offset
)
180 return edma_read(ctlr
, EDMA_SHADOW0
+ offset
);
182 static inline unsigned int edma_shadow0_read_array(unsigned ctlr
, int offset
,
185 return edma_read(ctlr
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
187 static inline void edma_shadow0_write(unsigned ctlr
, int offset
, unsigned val
)
189 edma_write(ctlr
, EDMA_SHADOW0
+ offset
, val
);
191 static inline void edma_shadow0_write_array(unsigned ctlr
, int offset
, int i
,
194 edma_write(ctlr
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
196 static inline unsigned int edma_parm_read(unsigned ctlr
, int offset
,
199 return edma_read(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5));
201 static inline void edma_parm_write(unsigned ctlr
, int offset
, int param_no
,
204 edma_write(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
206 static inline void edma_parm_modify(unsigned ctlr
, int offset
, int param_no
,
207 unsigned and, unsigned or)
209 edma_modify(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
211 static inline void edma_parm_and(unsigned ctlr
, int offset
, int param_no
,
214 edma_and(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
216 static inline void edma_parm_or(unsigned ctlr
, int offset
, int param_no
,
219 edma_or(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
222 static inline void set_bits(int offset
, int len
, unsigned long *p
)
224 for (; len
> 0; len
--)
225 set_bit(offset
+ (len
- 1), p
);
228 static inline void clear_bits(int offset
, int len
, unsigned long *p
)
230 for (; len
> 0; len
--)
231 clear_bit(offset
+ (len
- 1), p
);
234 /*****************************************************************************/
236 /* actual number of DMA channels and slots on this silicon */
238 /* how many dma resources of each type */
239 unsigned num_channels
;
243 enum dma_event_q default_queue
;
245 /* list of channels with no even trigger; terminated by "-1" */
248 struct edma_soc_info
*info
;
250 /* The edma_inuse bit for each PaRAM slot is clear unless the
251 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
253 DECLARE_BITMAP(edma_inuse
, EDMA_MAX_PARAMENTRY
);
255 /* The edma_unused bit for each channel is clear unless
256 * it is not being used on this platform. It uses a bit
257 * of SOC-specific initialization code.
259 DECLARE_BITMAP(edma_unused
, EDMA_MAX_DMACH
);
261 unsigned irq_res_start
;
262 unsigned irq_res_end
;
264 struct dma_interrupt_data
{
265 void (*callback
)(unsigned channel
, unsigned short ch_status
,
268 } intr_data
[EDMA_MAX_DMACH
];
271 static struct edma
*edma_cc
[EDMA_MAX_CC
];
272 static int arch_num_cc
;
274 /* dummy param set used to (re)initialize parameter RAM slots */
275 static const struct edmacc_param dummy_paramset
= {
276 .link_bcntrld
= 0xffff,
280 static const struct of_device_id edma_of_ids
[] = {
281 { .compatible
= "ti,edma3", },
285 /*****************************************************************************/
287 static void map_dmach_queue(unsigned ctlr
, unsigned ch_no
,
288 enum dma_event_q queue_no
)
290 int bit
= (ch_no
& 0x7) * 4;
292 /* default to low priority queue */
293 if (queue_no
== EVENTQ_DEFAULT
)
294 queue_no
= edma_cc
[ctlr
]->default_queue
;
297 edma_modify_array(ctlr
, EDMA_DMAQNUM
, (ch_no
>> 3),
298 ~(0x7 << bit
), queue_no
<< bit
);
301 static void assign_priority_to_queue(unsigned ctlr
, int queue_no
,
304 int bit
= queue_no
* 4;
305 edma_modify(ctlr
, EDMA_QUEPRI
, ~(0x7 << bit
),
306 ((priority
& 0x7) << bit
));
310 * map_dmach_param - Maps channel number to param entry number
312 * This maps the dma channel number to param entry numberter. In
313 * other words using the DMA channel mapping registers a param entry
314 * can be mapped to any channel
316 * Callers are responsible for ensuring the channel mapping logic is
317 * included in that particular EDMA variant (Eg : dm646x)
320 static void map_dmach_param(unsigned ctlr
)
323 for (i
= 0; i
< EDMA_MAX_DMACH
; i
++)
324 edma_write_array(ctlr
, EDMA_DCHMAP
, i
, (i
<< 5));
328 setup_dma_interrupt(unsigned lch
,
329 void (*callback
)(unsigned channel
, u16 ch_status
, void *data
),
334 ctlr
= EDMA_CTLR(lch
);
335 lch
= EDMA_CHAN_SLOT(lch
);
338 edma_shadow0_write_array(ctlr
, SH_IECR
, lch
>> 5,
341 edma_cc
[ctlr
]->intr_data
[lch
].callback
= callback
;
342 edma_cc
[ctlr
]->intr_data
[lch
].data
= data
;
345 edma_shadow0_write_array(ctlr
, SH_ICR
, lch
>> 5,
347 edma_shadow0_write_array(ctlr
, SH_IESR
, lch
>> 5,
352 static int irq2ctlr(int irq
)
354 if (irq
>= edma_cc
[0]->irq_res_start
&& irq
<= edma_cc
[0]->irq_res_end
)
356 else if (irq
>= edma_cc
[1]->irq_res_start
&&
357 irq
<= edma_cc
[1]->irq_res_end
)
363 /******************************************************************************
365 * DMA interrupt handler
367 *****************************************************************************/
368 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
375 ctlr
= irq2ctlr(irq
);
379 dev_dbg(data
, "dma_irq_handler\n");
381 sh_ipr
= edma_shadow0_read_array(ctlr
, SH_IPR
, 0);
383 sh_ipr
= edma_shadow0_read_array(ctlr
, SH_IPR
, 1);
386 sh_ier
= edma_shadow0_read_array(ctlr
, SH_IER
, 1);
389 sh_ier
= edma_shadow0_read_array(ctlr
, SH_IER
, 0);
397 dev_dbg(data
, "IPR%d %08x\n", bank
, sh_ipr
);
399 slot
= __ffs(sh_ipr
);
400 sh_ipr
&= ~(BIT(slot
));
402 if (sh_ier
& BIT(slot
)) {
403 channel
= (bank
<< 5) | slot
;
404 /* Clear the corresponding IPR bits */
405 edma_shadow0_write_array(ctlr
, SH_ICR
, bank
,
407 if (edma_cc
[ctlr
]->intr_data
[channel
].callback
)
408 edma_cc
[ctlr
]->intr_data
[channel
].callback(
409 EDMA_CTLR_CHAN(ctlr
, channel
),
411 edma_cc
[ctlr
]->intr_data
[channel
].data
);
415 edma_shadow0_write(ctlr
, SH_IEVAL
, 1);
419 /******************************************************************************
421 * DMA error interrupt handler
423 *****************************************************************************/
424 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
428 unsigned int cnt
= 0;
430 ctlr
= irq2ctlr(irq
);
434 dev_dbg(data
, "dma_ccerr_handler\n");
436 if ((edma_read_array(ctlr
, EDMA_EMR
, 0) == 0) &&
437 (edma_read_array(ctlr
, EDMA_EMR
, 1) == 0) &&
438 (edma_read(ctlr
, EDMA_QEMR
) == 0) &&
439 (edma_read(ctlr
, EDMA_CCERR
) == 0))
444 if (edma_read_array(ctlr
, EDMA_EMR
, 0))
446 else if (edma_read_array(ctlr
, EDMA_EMR
, 1))
449 dev_dbg(data
, "EMR%d %08x\n", j
,
450 edma_read_array(ctlr
, EDMA_EMR
, j
));
451 for (i
= 0; i
< 32; i
++) {
452 int k
= (j
<< 5) + i
;
453 if (edma_read_array(ctlr
, EDMA_EMR
, j
) &
455 /* Clear the corresponding EMR bits */
456 edma_write_array(ctlr
, EDMA_EMCR
, j
,
459 edma_shadow0_write_array(ctlr
, SH_SECR
,
461 if (edma_cc
[ctlr
]->intr_data
[k
].
463 edma_cc
[ctlr
]->intr_data
[k
].
465 EDMA_CTLR_CHAN(ctlr
, k
),
467 edma_cc
[ctlr
]->intr_data
472 } else if (edma_read(ctlr
, EDMA_QEMR
)) {
473 dev_dbg(data
, "QEMR %02x\n",
474 edma_read(ctlr
, EDMA_QEMR
));
475 for (i
= 0; i
< 8; i
++) {
476 if (edma_read(ctlr
, EDMA_QEMR
) & BIT(i
)) {
477 /* Clear the corresponding IPR bits */
478 edma_write(ctlr
, EDMA_QEMCR
, BIT(i
));
479 edma_shadow0_write(ctlr
, SH_QSECR
,
482 /* NOTE: not reported!! */
485 } else if (edma_read(ctlr
, EDMA_CCERR
)) {
486 dev_dbg(data
, "CCERR %08x\n",
487 edma_read(ctlr
, EDMA_CCERR
));
488 /* FIXME: CCERR.BIT(16) ignored! much better
489 * to just write CCERRCLR with CCERR value...
491 for (i
= 0; i
< 8; i
++) {
492 if (edma_read(ctlr
, EDMA_CCERR
) & BIT(i
)) {
493 /* Clear the corresponding IPR bits */
494 edma_write(ctlr
, EDMA_CCERRCLR
, BIT(i
));
496 /* NOTE: not reported!! */
500 if ((edma_read_array(ctlr
, EDMA_EMR
, 0) == 0) &&
501 (edma_read_array(ctlr
, EDMA_EMR
, 1) == 0) &&
502 (edma_read(ctlr
, EDMA_QEMR
) == 0) &&
503 (edma_read(ctlr
, EDMA_CCERR
) == 0))
509 edma_write(ctlr
, EDMA_EEVAL
, 1);
513 static int prepare_unused_channel_list(struct device
*dev
, void *data
)
515 struct platform_device
*pdev
= to_platform_device(dev
);
517 struct of_phandle_args dma_spec
;
520 count
= of_property_count_strings(dev
->of_node
, "dma-names");
523 for (i
= 0; i
< count
; i
++) {
524 if (of_parse_phandle_with_args(dev
->of_node
, "dmas",
529 if (!of_match_node(edma_of_ids
, dma_spec
.np
)) {
530 of_node_put(dma_spec
.np
);
534 clear_bit(EDMA_CHAN_SLOT(dma_spec
.args
[0]),
535 edma_cc
[0]->edma_unused
);
536 of_node_put(dma_spec
.np
);
541 /* For non-OF case */
542 for (i
= 0; i
< pdev
->num_resources
; i
++) {
543 if ((pdev
->resource
[i
].flags
& IORESOURCE_DMA
) &&
544 (int)pdev
->resource
[i
].start
>= 0) {
545 ctlr
= EDMA_CTLR(pdev
->resource
[i
].start
);
546 clear_bit(EDMA_CHAN_SLOT(pdev
->resource
[i
].start
),
547 edma_cc
[ctlr
]->edma_unused
);
554 /*-----------------------------------------------------------------------*/
556 static bool unused_chan_list_done
;
558 /* Resource alloc/free: dma channels, parameter RAM slots */
561 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
562 * @channel: specific channel to allocate; negative for "any unmapped channel"
563 * @callback: optional; to be issued on DMA completion or errors
564 * @data: passed to callback
565 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
566 * Controller (TC) executes requests using this channel. Use
567 * EVENTQ_DEFAULT unless you really need a high priority queue.
569 * This allocates a DMA channel and its associated parameter RAM slot.
570 * The parameter RAM is initialized to hold a dummy transfer.
572 * Normal use is to pass a specific channel number as @channel, to make
573 * use of hardware events mapped to that channel. When the channel will
574 * be used only for software triggering or event chaining, channels not
575 * mapped to hardware events (or mapped to unused events) are preferable.
577 * DMA transfers start from a channel using edma_start(), or by
578 * chaining. When the transfer described in that channel's parameter RAM
579 * slot completes, that slot's data may be reloaded through a link.
581 * DMA errors are only reported to the @callback associated with the
582 * channel driving that transfer, but transfer completion callbacks can
583 * be sent to another channel under control of the TCC field in
584 * the option word of the transfer's parameter RAM set. Drivers must not
585 * use DMA transfer completion callbacks for channels they did not allocate.
586 * (The same applies to TCC codes used in transfer chaining.)
588 * Returns the number of the channel, else negative errno.
590 int edma_alloc_channel(int channel
,
591 void (*callback
)(unsigned channel
, u16 ch_status
, void *data
),
593 enum dma_event_q eventq_no
)
595 unsigned i
, done
= 0, ctlr
= 0;
598 if (!unused_chan_list_done
) {
600 * Scan all the platform devices to find out the EDMA channels
601 * used and clear them in the unused list, making the rest
602 * available for ARM usage.
604 ret
= bus_for_each_dev(&platform_bus_type
, NULL
, NULL
,
605 prepare_unused_channel_list
);
609 unused_chan_list_done
= true;
613 ctlr
= EDMA_CTLR(channel
);
614 channel
= EDMA_CHAN_SLOT(channel
);
618 for (i
= 0; i
< arch_num_cc
; i
++) {
621 channel
= find_next_bit(edma_cc
[i
]->edma_unused
,
622 edma_cc
[i
]->num_channels
,
624 if (channel
== edma_cc
[i
]->num_channels
)
626 if (!test_and_set_bit(channel
,
627 edma_cc
[i
]->edma_inuse
)) {
639 } else if (channel
>= edma_cc
[ctlr
]->num_channels
) {
641 } else if (test_and_set_bit(channel
, edma_cc
[ctlr
]->edma_inuse
)) {
645 /* ensure access through shadow region 0 */
646 edma_or_array2(ctlr
, EDMA_DRAE
, 0, channel
>> 5, BIT(channel
& 0x1f));
648 /* ensure no events are pending */
649 edma_stop(EDMA_CTLR_CHAN(ctlr
, channel
));
650 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(channel
),
651 &dummy_paramset
, PARM_SIZE
);
654 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr
, channel
),
657 map_dmach_queue(ctlr
, channel
, eventq_no
);
659 return EDMA_CTLR_CHAN(ctlr
, channel
);
661 EXPORT_SYMBOL(edma_alloc_channel
);
665 * edma_free_channel - deallocate DMA channel
666 * @channel: dma channel returned from edma_alloc_channel()
668 * This deallocates the DMA channel and associated parameter RAM slot
669 * allocated by edma_alloc_channel().
671 * Callers are responsible for ensuring the channel is inactive, and
672 * will not be reactivated by linking, chaining, or software calls to
675 void edma_free_channel(unsigned channel
)
679 ctlr
= EDMA_CTLR(channel
);
680 channel
= EDMA_CHAN_SLOT(channel
);
682 if (channel
>= edma_cc
[ctlr
]->num_channels
)
685 setup_dma_interrupt(channel
, NULL
, NULL
);
686 /* REVISIT should probably take out of shadow region 0 */
688 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(channel
),
689 &dummy_paramset
, PARM_SIZE
);
690 clear_bit(channel
, edma_cc
[ctlr
]->edma_inuse
);
692 EXPORT_SYMBOL(edma_free_channel
);
695 * edma_alloc_slot - allocate DMA parameter RAM
696 * @slot: specific slot to allocate; negative for "any unused slot"
698 * This allocates a parameter RAM slot, initializing it to hold a
699 * dummy transfer. Slots allocated using this routine have not been
700 * mapped to a hardware DMA channel, and will normally be used by
701 * linking to them from a slot associated with a DMA channel.
703 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
704 * slots may be allocated on behalf of DSP firmware.
706 * Returns the number of the slot, else negative errno.
708 int edma_alloc_slot(unsigned ctlr
, int slot
)
714 slot
= EDMA_CHAN_SLOT(slot
);
717 slot
= edma_cc
[ctlr
]->num_channels
;
719 slot
= find_next_zero_bit(edma_cc
[ctlr
]->edma_inuse
,
720 edma_cc
[ctlr
]->num_slots
, slot
);
721 if (slot
== edma_cc
[ctlr
]->num_slots
)
723 if (!test_and_set_bit(slot
, edma_cc
[ctlr
]->edma_inuse
))
726 } else if (slot
< edma_cc
[ctlr
]->num_channels
||
727 slot
>= edma_cc
[ctlr
]->num_slots
) {
729 } else if (test_and_set_bit(slot
, edma_cc
[ctlr
]->edma_inuse
)) {
733 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
),
734 &dummy_paramset
, PARM_SIZE
);
736 return EDMA_CTLR_CHAN(ctlr
, slot
);
738 EXPORT_SYMBOL(edma_alloc_slot
);
741 * edma_free_slot - deallocate DMA parameter RAM
742 * @slot: parameter RAM slot returned from edma_alloc_slot()
744 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
745 * Callers are responsible for ensuring the slot is inactive, and will
748 void edma_free_slot(unsigned slot
)
752 ctlr
= EDMA_CTLR(slot
);
753 slot
= EDMA_CHAN_SLOT(slot
);
755 if (slot
< edma_cc
[ctlr
]->num_channels
||
756 slot
>= edma_cc
[ctlr
]->num_slots
)
759 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
),
760 &dummy_paramset
, PARM_SIZE
);
761 clear_bit(slot
, edma_cc
[ctlr
]->edma_inuse
);
763 EXPORT_SYMBOL(edma_free_slot
);
765 /*-----------------------------------------------------------------------*/
767 /* Parameter RAM operations (i) -- read/write partial slots */
770 * edma_get_position - returns the current transfer point
771 * @slot: parameter RAM slot being examined
772 * @dst: true selects the dest position, false the source
774 * Returns the position of the current active slot
776 dma_addr_t
edma_get_position(unsigned slot
, bool dst
)
778 u32 offs
, ctlr
= EDMA_CTLR(slot
);
780 slot
= EDMA_CHAN_SLOT(slot
);
782 offs
= PARM_OFFSET(slot
);
783 offs
+= dst
? PARM_DST
: PARM_SRC
;
785 return edma_read(ctlr
, offs
);
789 * edma_link - link one parameter RAM slot to another
790 * @from: parameter RAM slot originating the link
791 * @to: parameter RAM slot which is the link target
793 * The originating slot should not be part of any active DMA transfer.
795 void edma_link(unsigned from
, unsigned to
)
797 unsigned ctlr_from
, ctlr_to
;
799 ctlr_from
= EDMA_CTLR(from
);
800 from
= EDMA_CHAN_SLOT(from
);
801 ctlr_to
= EDMA_CTLR(to
);
802 to
= EDMA_CHAN_SLOT(to
);
804 if (from
>= edma_cc
[ctlr_from
]->num_slots
)
806 if (to
>= edma_cc
[ctlr_to
]->num_slots
)
808 edma_parm_modify(ctlr_from
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
811 EXPORT_SYMBOL(edma_link
);
813 /*-----------------------------------------------------------------------*/
815 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
818 * edma_write_slot - write parameter RAM data for slot
819 * @slot: number of parameter RAM slot being modified
820 * @param: data to be written into parameter RAM slot
822 * Use this to assign all parameters of a transfer at once. This
823 * allows more efficient setup of transfers than issuing multiple
824 * calls to set up those parameters in small pieces, and provides
825 * complete control over all transfer options.
827 void edma_write_slot(unsigned slot
, const struct edmacc_param
*param
)
831 ctlr
= EDMA_CTLR(slot
);
832 slot
= EDMA_CHAN_SLOT(slot
);
834 if (slot
>= edma_cc
[ctlr
]->num_slots
)
836 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
), param
,
839 EXPORT_SYMBOL(edma_write_slot
);
842 * edma_read_slot - read parameter RAM data from slot
843 * @slot: number of parameter RAM slot being copied
844 * @param: where to store copy of parameter RAM data
846 * Use this to read data from a parameter RAM slot, perhaps to
847 * save them as a template for later reuse.
849 void edma_read_slot(unsigned slot
, struct edmacc_param
*param
)
853 ctlr
= EDMA_CTLR(slot
);
854 slot
= EDMA_CHAN_SLOT(slot
);
856 if (slot
>= edma_cc
[ctlr
]->num_slots
)
858 memcpy_fromio(param
, edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
),
861 EXPORT_SYMBOL(edma_read_slot
);
863 /*-----------------------------------------------------------------------*/
865 /* Various EDMA channel control operations */
868 * edma_pause - pause dma on a channel
869 * @channel: on which edma_start() has been called
871 * This temporarily disables EDMA hardware events on the specified channel,
872 * preventing them from triggering new transfers on its behalf
874 void edma_pause(unsigned channel
)
878 ctlr
= EDMA_CTLR(channel
);
879 channel
= EDMA_CHAN_SLOT(channel
);
881 if (channel
< edma_cc
[ctlr
]->num_channels
) {
882 unsigned int mask
= BIT(channel
& 0x1f);
884 edma_shadow0_write_array(ctlr
, SH_EECR
, channel
>> 5, mask
);
887 EXPORT_SYMBOL(edma_pause
);
890 * edma_resume - resumes dma on a paused channel
891 * @channel: on which edma_pause() has been called
893 * This re-enables EDMA hardware events on the specified channel.
895 void edma_resume(unsigned channel
)
899 ctlr
= EDMA_CTLR(channel
);
900 channel
= EDMA_CHAN_SLOT(channel
);
902 if (channel
< edma_cc
[ctlr
]->num_channels
) {
903 unsigned int mask
= BIT(channel
& 0x1f);
905 edma_shadow0_write_array(ctlr
, SH_EESR
, channel
>> 5, mask
);
908 EXPORT_SYMBOL(edma_resume
);
910 int edma_trigger_channel(unsigned channel
)
915 ctlr
= EDMA_CTLR(channel
);
916 channel
= EDMA_CHAN_SLOT(channel
);
917 mask
= BIT(channel
& 0x1f);
919 edma_shadow0_write_array(ctlr
, SH_ESR
, (channel
>> 5), mask
);
921 pr_debug("EDMA: ESR%d %08x\n", (channel
>> 5),
922 edma_shadow0_read_array(ctlr
, SH_ESR
, (channel
>> 5)));
925 EXPORT_SYMBOL(edma_trigger_channel
);
928 * edma_start - start dma on a channel
929 * @channel: channel being activated
931 * Channels with event associations will be triggered by their hardware
932 * events, and channels without such associations will be triggered by
933 * software. (At this writing there is no interface for using software
934 * triggers except with channels that don't support hardware triggers.)
936 * Returns zero on success, else negative errno.
938 int edma_start(unsigned channel
)
942 ctlr
= EDMA_CTLR(channel
);
943 channel
= EDMA_CHAN_SLOT(channel
);
945 if (channel
< edma_cc
[ctlr
]->num_channels
) {
946 int j
= channel
>> 5;
947 unsigned int mask
= BIT(channel
& 0x1f);
949 /* EDMA channels without event association */
950 if (test_bit(channel
, edma_cc
[ctlr
]->edma_unused
)) {
951 pr_debug("EDMA: ESR%d %08x\n", j
,
952 edma_shadow0_read_array(ctlr
, SH_ESR
, j
));
953 edma_shadow0_write_array(ctlr
, SH_ESR
, j
, mask
);
957 /* EDMA channel with event association */
958 pr_debug("EDMA: ER%d %08x\n", j
,
959 edma_shadow0_read_array(ctlr
, SH_ER
, j
));
960 /* Clear any pending event or error */
961 edma_write_array(ctlr
, EDMA_ECR
, j
, mask
);
962 edma_write_array(ctlr
, EDMA_EMCR
, j
, mask
);
964 edma_shadow0_write_array(ctlr
, SH_SECR
, j
, mask
);
965 edma_shadow0_write_array(ctlr
, SH_EESR
, j
, mask
);
966 pr_debug("EDMA: EER%d %08x\n", j
,
967 edma_shadow0_read_array(ctlr
, SH_EER
, j
));
973 EXPORT_SYMBOL(edma_start
);
976 * edma_stop - stops dma on the channel passed
977 * @channel: channel being deactivated
979 * When @lch is a channel, any active transfer is paused and
980 * all pending hardware events are cleared. The current transfer
981 * may not be resumed, and the channel's Parameter RAM should be
982 * reinitialized before being reused.
984 void edma_stop(unsigned channel
)
988 ctlr
= EDMA_CTLR(channel
);
989 channel
= EDMA_CHAN_SLOT(channel
);
991 if (channel
< edma_cc
[ctlr
]->num_channels
) {
992 int j
= channel
>> 5;
993 unsigned int mask
= BIT(channel
& 0x1f);
995 edma_shadow0_write_array(ctlr
, SH_EECR
, j
, mask
);
996 edma_shadow0_write_array(ctlr
, SH_ECR
, j
, mask
);
997 edma_shadow0_write_array(ctlr
, SH_SECR
, j
, mask
);
998 edma_write_array(ctlr
, EDMA_EMCR
, j
, mask
);
1000 /* clear possibly pending completion interrupt */
1001 edma_shadow0_write_array(ctlr
, SH_ICR
, j
, mask
);
1003 pr_debug("EDMA: EER%d %08x\n", j
,
1004 edma_shadow0_read_array(ctlr
, SH_EER
, j
));
1006 /* REVISIT: consider guarding against inappropriate event
1007 * chaining by overwriting with dummy_paramset.
1011 EXPORT_SYMBOL(edma_stop
);
1013 /******************************************************************************
1015 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1016 * been removed before EDMA has finished.It is usedful for removable media.
1018 * ch_no - channel no
1020 * Return: zero on success, or corresponding error no on failure
1022 * FIXME this should not be needed ... edma_stop() should suffice.
1024 *****************************************************************************/
1026 void edma_clean_channel(unsigned channel
)
1030 ctlr
= EDMA_CTLR(channel
);
1031 channel
= EDMA_CHAN_SLOT(channel
);
1033 if (channel
< edma_cc
[ctlr
]->num_channels
) {
1034 int j
= (channel
>> 5);
1035 unsigned int mask
= BIT(channel
& 0x1f);
1037 pr_debug("EDMA: EMR%d %08x\n", j
,
1038 edma_read_array(ctlr
, EDMA_EMR
, j
));
1039 edma_shadow0_write_array(ctlr
, SH_ECR
, j
, mask
);
1040 /* Clear the corresponding EMR bits */
1041 edma_write_array(ctlr
, EDMA_EMCR
, j
, mask
);
1043 edma_shadow0_write_array(ctlr
, SH_SECR
, j
, mask
);
1044 edma_write(ctlr
, EDMA_CCERRCLR
, BIT(16) | BIT(1) | BIT(0));
1047 EXPORT_SYMBOL(edma_clean_channel
);
1050 * edma_assign_channel_eventq - move given channel to desired eventq
1052 * channel - channel number
1053 * eventq_no - queue to move the channel
1055 * Can be used to move a channel to a selected event queue.
1057 void edma_assign_channel_eventq(unsigned channel
, enum dma_event_q eventq_no
)
1061 ctlr
= EDMA_CTLR(channel
);
1062 channel
= EDMA_CHAN_SLOT(channel
);
1064 if (channel
>= edma_cc
[ctlr
]->num_channels
)
1067 /* default to low priority queue */
1068 if (eventq_no
== EVENTQ_DEFAULT
)
1069 eventq_no
= edma_cc
[ctlr
]->default_queue
;
1070 if (eventq_no
>= edma_cc
[ctlr
]->num_tc
)
1073 map_dmach_queue(ctlr
, channel
, eventq_no
);
1075 EXPORT_SYMBOL(edma_assign_channel_eventq
);
1077 static int edma_setup_from_hw(struct device
*dev
, struct edma_soc_info
*pdata
,
1078 struct edma
*edma_cc
, int cc_id
)
1082 s8 (*queue_priority_map
)[2];
1084 /* Decode the eDMA3 configuration from CCCFG register */
1085 cccfg
= edma_read(cc_id
, EDMA_CCCFG
);
1087 value
= GET_NUM_REGN(cccfg
);
1088 edma_cc
->num_region
= BIT(value
);
1090 value
= GET_NUM_DMACH(cccfg
);
1091 edma_cc
->num_channels
= BIT(value
+ 1);
1093 value
= GET_NUM_PAENTRY(cccfg
);
1094 edma_cc
->num_slots
= BIT(value
+ 4);
1096 value
= GET_NUM_EVQUE(cccfg
);
1097 edma_cc
->num_tc
= value
+ 1;
1099 dev_dbg(dev
, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id
,
1101 dev_dbg(dev
, "num_region: %u\n", edma_cc
->num_region
);
1102 dev_dbg(dev
, "num_channel: %u\n", edma_cc
->num_channels
);
1103 dev_dbg(dev
, "num_slot: %u\n", edma_cc
->num_slots
);
1104 dev_dbg(dev
, "num_tc: %u\n", edma_cc
->num_tc
);
1106 /* Nothing need to be done if queue priority is provided */
1107 if (pdata
->queue_priority_mapping
)
1111 * Configure TC/queue priority as follows:
1116 * The meaning of priority numbers: 0 highest priority, 7 lowest
1117 * priority. So Q0 is the highest priority queue and the last queue has
1118 * the lowest priority.
1120 queue_priority_map
= devm_kzalloc(dev
,
1121 (edma_cc
->num_tc
+ 1) * sizeof(s8
),
1123 if (!queue_priority_map
)
1126 for (i
= 0; i
< edma_cc
->num_tc
; i
++) {
1127 queue_priority_map
[i
][0] = i
;
1128 queue_priority_map
[i
][1] = i
;
1130 queue_priority_map
[i
][0] = -1;
1131 queue_priority_map
[i
][1] = -1;
1133 pdata
->queue_priority_mapping
= queue_priority_map
;
1134 /* Default queue has the lowest priority */
1135 pdata
->default_queue
= i
- 1;
1140 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
1142 static int edma_xbar_event_map(struct device
*dev
, struct device_node
*node
,
1143 struct edma_soc_info
*pdata
, size_t sz
)
1145 const char pname
[] = "ti,edma-xbar-event-map";
1146 struct resource res
;
1148 s16 (*xbar_chans
)[2];
1149 size_t nelm
= sz
/ sizeof(s16
);
1150 u32 shift
, offset
, mux
;
1153 xbar_chans
= devm_kzalloc(dev
, (nelm
+ 2) * sizeof(s16
), GFP_KERNEL
);
1157 ret
= of_address_to_resource(node
, 1, &res
);
1161 xbar
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
1165 ret
= of_property_read_u16_array(node
, pname
, (u16
*)xbar_chans
, nelm
);
1169 /* Invalidate last entry for the other user of this mess */
1171 xbar_chans
[nelm
][0] = xbar_chans
[nelm
][1] = -1;
1173 for (i
= 0; i
< nelm
; i
++) {
1174 shift
= (xbar_chans
[i
][1] & 0x03) << 3;
1175 offset
= xbar_chans
[i
][1] & 0xfffffffc;
1176 mux
= readl(xbar
+ offset
);
1177 mux
&= ~(0xff << shift
);
1178 mux
|= xbar_chans
[i
][0] << shift
;
1179 writel(mux
, (xbar
+ offset
));
1182 pdata
->xbar_chans
= (const s16 (*)[2]) xbar_chans
;
1186 static int edma_of_parse_dt(struct device
*dev
,
1187 struct device_node
*node
,
1188 struct edma_soc_info
*pdata
)
1191 struct property
*prop
;
1193 struct edma_rsv_info
*rsv_info
;
1195 rsv_info
= devm_kzalloc(dev
, sizeof(struct edma_rsv_info
), GFP_KERNEL
);
1198 pdata
->rsv
= rsv_info
;
1200 prop
= of_find_property(node
, "ti,edma-xbar-event-map", &sz
);
1202 ret
= edma_xbar_event_map(dev
, node
, pdata
, sz
);
1207 static struct of_dma_filter_info edma_filter_info
= {
1208 .filter_fn
= edma_filter_fn
,
1211 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
,
1212 struct device_node
*node
)
1214 struct edma_soc_info
*info
;
1217 info
= devm_kzalloc(dev
, sizeof(struct edma_soc_info
), GFP_KERNEL
);
1219 return ERR_PTR(-ENOMEM
);
1221 ret
= edma_of_parse_dt(dev
, node
, info
);
1223 return ERR_PTR(ret
);
1225 dma_cap_set(DMA_SLAVE
, edma_filter_info
.dma_cap
);
1226 dma_cap_set(DMA_CYCLIC
, edma_filter_info
.dma_cap
);
1227 of_dma_controller_register(dev
->of_node
, of_dma_simple_xlate
,
1233 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
,
1234 struct device_node
*node
)
1236 return ERR_PTR(-ENOSYS
);
1240 static int edma_probe(struct platform_device
*pdev
)
1242 struct edma_soc_info
**info
= pdev
->dev
.platform_data
;
1243 struct edma_soc_info
*ninfo
[EDMA_MAX_CC
] = {NULL
};
1244 s8 (*queue_priority_mapping
)[2];
1245 int i
, j
, off
, ln
, found
= 0;
1247 const s16 (*rsv_chans
)[2];
1248 const s16 (*rsv_slots
)[2];
1249 const s16 (*xbar_chans
)[2];
1250 int irq
[EDMA_MAX_CC
] = {0, 0};
1251 int err_irq
[EDMA_MAX_CC
] = {0, 0};
1252 struct resource
*r
[EDMA_MAX_CC
] = {NULL
};
1253 struct resource res
[EDMA_MAX_CC
];
1255 struct device_node
*node
= pdev
->dev
.of_node
;
1256 struct device
*dev
= &pdev
->dev
;
1258 struct platform_device_info edma_dev_info
= {
1259 .name
= "edma-dma-engine",
1260 .dma_mask
= DMA_BIT_MASK(32),
1261 .parent
= &pdev
->dev
,
1265 /* Check if this is a second instance registered */
1267 dev_err(dev
, "only one EDMA instance is supported via DT\n");
1271 ninfo
[0] = edma_setup_info_from_dt(dev
, node
);
1272 if (IS_ERR(ninfo
[0])) {
1273 dev_err(dev
, "failed to get DT data\n");
1274 return PTR_ERR(ninfo
[0]);
1283 pm_runtime_enable(dev
);
1284 ret
= pm_runtime_get_sync(dev
);
1286 dev_err(dev
, "pm_runtime_get_sync() failed\n");
1290 for (j
= 0; j
< EDMA_MAX_CC
; j
++) {
1297 ret
= of_address_to_resource(node
, j
, &res
[j
]);
1301 sprintf(res_name
, "edma_cc%d", j
);
1302 r
[j
] = platform_get_resource_byname(pdev
,
1315 edmacc_regs_base
[j
] = devm_ioremap_resource(&pdev
->dev
, r
[j
]);
1316 if (IS_ERR(edmacc_regs_base
[j
]))
1317 return PTR_ERR(edmacc_regs_base
[j
]);
1319 edma_cc
[j
] = devm_kzalloc(&pdev
->dev
, sizeof(struct edma
),
1324 /* Get eDMA3 configuration from IP */
1325 ret
= edma_setup_from_hw(dev
, info
[j
], edma_cc
[j
], j
);
1329 edma_cc
[j
]->default_queue
= info
[j
]->default_queue
;
1331 dev_dbg(&pdev
->dev
, "DMA REG BASE ADDR=%p\n",
1332 edmacc_regs_base
[j
]);
1334 for (i
= 0; i
< edma_cc
[j
]->num_slots
; i
++)
1335 memcpy_toio(edmacc_regs_base
[j
] + PARM_OFFSET(i
),
1336 &dummy_paramset
, PARM_SIZE
);
1338 /* Mark all channels as unused */
1339 memset(edma_cc
[j
]->edma_unused
, 0xff,
1340 sizeof(edma_cc
[j
]->edma_unused
));
1344 /* Clear the reserved channels in unused list */
1345 rsv_chans
= info
[j
]->rsv
->rsv_chans
;
1347 for (i
= 0; rsv_chans
[i
][0] != -1; i
++) {
1348 off
= rsv_chans
[i
][0];
1349 ln
= rsv_chans
[i
][1];
1351 edma_cc
[j
]->edma_unused
);
1355 /* Set the reserved slots in inuse list */
1356 rsv_slots
= info
[j
]->rsv
->rsv_slots
;
1358 for (i
= 0; rsv_slots
[i
][0] != -1; i
++) {
1359 off
= rsv_slots
[i
][0];
1360 ln
= rsv_slots
[i
][1];
1362 edma_cc
[j
]->edma_inuse
);
1367 /* Clear the xbar mapped channels in unused list */
1368 xbar_chans
= info
[j
]->xbar_chans
;
1370 for (i
= 0; xbar_chans
[i
][1] != -1; i
++) {
1371 off
= xbar_chans
[i
][1];
1373 edma_cc
[j
]->edma_unused
);
1378 irq
[j
] = irq_of_parse_and_map(node
, 0);
1379 err_irq
[j
] = irq_of_parse_and_map(node
, 2);
1383 sprintf(irq_name
, "edma%d", j
);
1384 irq
[j
] = platform_get_irq_byname(pdev
, irq_name
);
1386 sprintf(irq_name
, "edma%d_err", j
);
1387 err_irq
[j
] = platform_get_irq_byname(pdev
, irq_name
);
1389 edma_cc
[j
]->irq_res_start
= irq
[j
];
1390 edma_cc
[j
]->irq_res_end
= err_irq
[j
];
1392 status
= devm_request_irq(dev
, irq
[j
], dma_irq_handler
, 0,
1396 "devm_request_irq %d failed --> %d\n",
1401 status
= devm_request_irq(dev
, err_irq
[j
], dma_ccerr_handler
, 0,
1405 "devm_request_irq %d failed --> %d\n",
1406 err_irq
[j
], status
);
1410 for (i
= 0; i
< edma_cc
[j
]->num_channels
; i
++)
1411 map_dmach_queue(j
, i
, info
[j
]->default_queue
);
1413 queue_priority_mapping
= info
[j
]->queue_priority_mapping
;
1415 /* Event queue priority mapping */
1416 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
1417 assign_priority_to_queue(j
,
1418 queue_priority_mapping
[i
][0],
1419 queue_priority_mapping
[i
][1]);
1421 /* Map the channel to param entry if channel mapping logic
1424 if (edma_read(j
, EDMA_CCCFG
) & CHMAP_EXIST
)
1427 for (i
= 0; i
< edma_cc
[j
]->num_region
; i
++) {
1428 edma_write_array2(j
, EDMA_DRAE
, i
, 0, 0x0);
1429 edma_write_array2(j
, EDMA_DRAE
, i
, 1, 0x0);
1430 edma_write_array(j
, EDMA_QRAE
, i
, 0x0);
1432 edma_cc
[j
]->info
= info
[j
];
1435 edma_dev_info
.id
= j
;
1436 platform_device_register_full(&edma_dev_info
);
1442 #ifdef CONFIG_PM_SLEEP
1443 static int edma_pm_resume(struct device
*dev
)
1447 for (j
= 0; j
< arch_num_cc
; j
++) {
1448 struct edma
*cc
= edma_cc
[j
];
1450 s8 (*queue_priority_mapping
)[2];
1452 queue_priority_mapping
= cc
->info
->queue_priority_mapping
;
1454 /* Event queue priority mapping */
1455 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
1456 assign_priority_to_queue(j
,
1457 queue_priority_mapping
[i
][0],
1458 queue_priority_mapping
[i
][1]);
1461 * Map the channel to param entry if channel mapping logic
1464 if (edma_read(j
, EDMA_CCCFG
) & CHMAP_EXIST
)
1467 for (i
= 0; i
< cc
->num_channels
; i
++) {
1468 if (test_bit(i
, cc
->edma_inuse
)) {
1469 /* ensure access through shadow region 0 */
1470 edma_or_array2(j
, EDMA_DRAE
, 0, i
>> 5,
1473 setup_dma_interrupt(i
,
1474 cc
->intr_data
[i
].callback
,
1475 cc
->intr_data
[i
].data
);
1484 static const struct dev_pm_ops edma_pm_ops
= {
1485 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL
, edma_pm_resume
)
1488 static struct platform_driver edma_driver
= {
1492 .of_match_table
= edma_of_ids
,
1494 .probe
= edma_probe
,
1497 static int __init
edma_init(void)
1499 return platform_driver_probe(&edma_driver
, edma_probe
);
1501 arch_initcall(edma_init
);