2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014
7 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
8 * (defines, structures and comments) was taken from MPC5121 DMA driver
9 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
11 * Approved as OSADL project by a majority of OSADL members and funded
12 * by OSADL membership fees in 2009; for details see www.osadl.org.
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24 * You should have received a copy of the GNU General Public License along with
25 * this program; if not, write to the Free Software Foundation, Inc., 59
26 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 * The full GNU General Public License is included in this distribution in the
29 * file called COPYING.
33 * MPC512x and MPC8308 DMA driver. It supports
34 * memory to memory data transfers (tested using dmatest module) and
35 * data transfers between memory and peripheral I/O memory
36 * by means of slave scatter/gather with these limitations:
37 * - chunked transfers (described by s/g lists with more than one item)
38 * are refused as long as proper support for scatter/gather is missing;
39 * - transfers on MPC8308 always start from software as this SoC appears
40 * not to have external request lines for peripheral flow control;
41 * - only peripheral devices with 4-byte FIFO access register are supported;
42 * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
43 * source and destination addresses must be 4-byte aligned
44 * and transfer size must be aligned on (4 * maxburst) boundary;
47 #include <linux/module.h>
48 #include <linux/dmaengine.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/interrupt.h>
52 #include <linux/slab.h>
53 #include <linux/of_address.h>
54 #include <linux/of_device.h>
55 #include <linux/of_irq.h>
56 #include <linux/of_platform.h>
58 #include <linux/random.h>
60 #include "dmaengine.h"
62 /* Number of DMA Transfer descriptors allocated per channel */
63 #define MPC_DMA_DESCRIPTORS 64
65 /* Macro definitions */
66 #define MPC_DMA_TCD_OFFSET 0x1000
69 * Maximum channel counts for individual hardware variants
70 * and the maximum channel count over all supported controllers,
71 * used for data structure size
73 #define MPC8308_DMACHAN_MAX 16
74 #define MPC512x_DMACHAN_MAX 64
75 #define MPC_DMA_CHANNELS 64
77 /* Arbitration mode of group and channel */
78 #define MPC_DMA_DMACR_EDCG (1 << 31)
79 #define MPC_DMA_DMACR_ERGA (1 << 3)
80 #define MPC_DMA_DMACR_ERCA (1 << 2)
83 #define MPC_DMA_DMAES_VLD (1 << 31)
84 #define MPC_DMA_DMAES_GPE (1 << 15)
85 #define MPC_DMA_DMAES_CPE (1 << 14)
86 #define MPC_DMA_DMAES_ERRCHN(err) \
88 #define MPC_DMA_DMAES_SAE (1 << 7)
89 #define MPC_DMA_DMAES_SOE (1 << 6)
90 #define MPC_DMA_DMAES_DAE (1 << 5)
91 #define MPC_DMA_DMAES_DOE (1 << 4)
92 #define MPC_DMA_DMAES_NCE (1 << 3)
93 #define MPC_DMA_DMAES_SGE (1 << 2)
94 #define MPC_DMA_DMAES_SBE (1 << 1)
95 #define MPC_DMA_DMAES_DBE (1 << 0)
97 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
99 #define MPC_DMA_TSIZE_1 0x00
100 #define MPC_DMA_TSIZE_2 0x01
101 #define MPC_DMA_TSIZE_4 0x02
102 #define MPC_DMA_TSIZE_16 0x04
103 #define MPC_DMA_TSIZE_32 0x05
105 /* MPC5121 DMA engine registers */
106 struct __attribute__ ((__packed__
)) mpc_dma_regs
{
108 u32 dmacr
; /* DMA control register */
109 u32 dmaes
; /* DMA error status */
111 u32 dmaerqh
; /* DMA enable request high(channels 63~32) */
112 u32 dmaerql
; /* DMA enable request low(channels 31~0) */
113 u32 dmaeeih
; /* DMA enable error interrupt high(ch63~32) */
114 u32 dmaeeil
; /* DMA enable error interrupt low(ch31~0) */
116 u8 dmaserq
; /* DMA set enable request */
117 u8 dmacerq
; /* DMA clear enable request */
118 u8 dmaseei
; /* DMA set enable error interrupt */
119 u8 dmaceei
; /* DMA clear enable error interrupt */
121 u8 dmacint
; /* DMA clear interrupt request */
122 u8 dmacerr
; /* DMA clear error */
123 u8 dmassrt
; /* DMA set start bit */
124 u8 dmacdne
; /* DMA clear DONE status bit */
126 u32 dmainth
; /* DMA interrupt request high(ch63~32) */
127 u32 dmaintl
; /* DMA interrupt request low(ch31~0) */
128 u32 dmaerrh
; /* DMA error high(ch63~32) */
129 u32 dmaerrl
; /* DMA error low(ch31~0) */
131 u32 dmahrsh
; /* DMA hw request status high(ch63~32) */
132 u32 dmahrsl
; /* DMA hardware request status low(ch31~0) */
134 u32 dmaihsa
; /* DMA interrupt high select AXE(ch63~32) */
135 u32 dmagpor
; /* (General purpose register on MPC8308) */
137 u32 dmailsa
; /* DMA interrupt low select AXE(ch31~0) */
139 u32 reserve0
[48]; /* Reserved */
141 u8 dchpri
[MPC_DMA_CHANNELS
];
142 /* DMA channels(0~63) priority */
145 struct __attribute__ ((__packed__
)) mpc_dma_tcd
{
147 u32 saddr
; /* Source address */
149 u32 smod
:5; /* Source address modulo */
150 u32 ssize
:3; /* Source data transfer size */
151 u32 dmod
:5; /* Destination address modulo */
152 u32 dsize
:3; /* Destination data transfer size */
153 u32 soff
:16; /* Signed source address offset */
156 u32 nbytes
; /* Inner "minor" byte count */
157 u32 slast
; /* Last source address adjustment */
158 u32 daddr
; /* Destination address */
161 u32 citer_elink
:1; /* Enable channel-to-channel linking on
162 * minor loop complete
164 u32 citer_linkch
:6; /* Link channel for minor loop complete */
165 u32 citer
:9; /* Current "major" iteration count */
166 u32 doff
:16; /* Signed destination address offset */
169 u32 dlast_sga
; /* Last Destination address adjustment/scatter
174 u32 biter_elink
:1; /* Enable channel-to-channel linking on major
178 u32 biter
:9; /* Beginning "major" iteration count */
179 u32 bwc
:2; /* Bandwidth control */
180 u32 major_linkch
:6; /* Link channel number */
181 u32 done
:1; /* Channel done */
182 u32 active
:1; /* Channel active */
183 u32 major_elink
:1; /* Enable channel-to-channel linking on major
186 u32 e_sg
:1; /* Enable scatter/gather processing */
187 u32 d_req
:1; /* Disable request */
188 u32 int_half
:1; /* Enable an interrupt when major counter is
191 u32 int_maj
:1; /* Enable an interrupt when major iteration
194 u32 start
:1; /* Channel start */
197 struct mpc_dma_desc
{
198 struct dma_async_tx_descriptor desc
;
199 struct mpc_dma_tcd
*tcd
;
200 dma_addr_t tcd_paddr
;
202 struct list_head node
;
203 int will_access_peripheral
;
206 struct mpc_dma_chan
{
207 struct dma_chan chan
;
208 struct list_head free
;
209 struct list_head prepared
;
210 struct list_head queued
;
211 struct list_head active
;
212 struct list_head completed
;
213 struct mpc_dma_tcd
*tcd
;
214 dma_addr_t tcd_paddr
;
216 /* Settings for access to peripheral FIFO */
217 dma_addr_t src_per_paddr
;
219 dma_addr_t dst_per_paddr
;
222 /* Lock for this structure */
227 struct dma_device dma
;
228 struct tasklet_struct tasklet
;
229 struct mpc_dma_chan channels
[MPC_DMA_CHANNELS
];
230 struct mpc_dma_regs __iomem
*regs
;
231 struct mpc_dma_tcd __iomem
*tcd
;
237 /* Lock for error_status field in this structure */
238 spinlock_t error_status_lock
;
241 #define DRV_NAME "mpc512x_dma"
243 /* Convert struct dma_chan to struct mpc_dma_chan */
244 static inline struct mpc_dma_chan
*dma_chan_to_mpc_dma_chan(struct dma_chan
*c
)
246 return container_of(c
, struct mpc_dma_chan
, chan
);
249 /* Convert struct dma_chan to struct mpc_dma */
250 static inline struct mpc_dma
*dma_chan_to_mpc_dma(struct dma_chan
*c
)
252 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(c
);
253 return container_of(mchan
, struct mpc_dma
, channels
[c
->chan_id
]);
257 * Execute all queued DMA descriptors.
259 * Following requirements must be met while calling mpc_dma_execute():
260 * a) mchan->lock is acquired,
261 * b) mchan->active list is empty,
262 * c) mchan->queued list contains at least one entry.
264 static void mpc_dma_execute(struct mpc_dma_chan
*mchan
)
266 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(&mchan
->chan
);
267 struct mpc_dma_desc
*first
= NULL
;
268 struct mpc_dma_desc
*prev
= NULL
;
269 struct mpc_dma_desc
*mdesc
;
270 int cid
= mchan
->chan
.chan_id
;
272 while (!list_empty(&mchan
->queued
)) {
273 mdesc
= list_first_entry(&mchan
->queued
,
274 struct mpc_dma_desc
, node
);
276 * Grab either several mem-to-mem transfer descriptors
277 * or one peripheral transfer descriptor,
278 * don't mix mem-to-mem and peripheral transfer descriptors
279 * within the same 'active' list.
281 if (mdesc
->will_access_peripheral
) {
282 if (list_empty(&mchan
->active
))
283 list_move_tail(&mdesc
->node
, &mchan
->active
);
286 list_move_tail(&mdesc
->node
, &mchan
->active
);
290 /* Chain descriptors into one transaction */
291 list_for_each_entry(mdesc
, &mchan
->active
, node
) {
300 prev
->tcd
->dlast_sga
= mdesc
->tcd_paddr
;
302 mdesc
->tcd
->start
= 1;
307 prev
->tcd
->int_maj
= 1;
309 /* Send first descriptor in chain into hardware */
310 memcpy_toio(&mdma
->tcd
[cid
], first
->tcd
, sizeof(struct mpc_dma_tcd
));
313 mdma
->tcd
[cid
].e_sg
= 1;
315 if (mdma
->is_mpc8308
) {
316 /* MPC8308, no request lines, software initiated start */
317 out_8(&mdma
->regs
->dmassrt
, cid
);
318 } else if (first
->will_access_peripheral
) {
319 /* Peripherals involved, start by external request signal */
320 out_8(&mdma
->regs
->dmaserq
, cid
);
322 /* Memory to memory transfer, software initiated start */
323 out_8(&mdma
->regs
->dmassrt
, cid
);
327 /* Handle interrupt on one half of DMA controller (32 channels) */
328 static void mpc_dma_irq_process(struct mpc_dma
*mdma
, u32 is
, u32 es
, int off
)
330 struct mpc_dma_chan
*mchan
;
331 struct mpc_dma_desc
*mdesc
;
332 u32 status
= is
| es
;
335 while ((ch
= fls(status
) - 1) >= 0) {
336 status
&= ~(1 << ch
);
337 mchan
= &mdma
->channels
[ch
+ off
];
339 spin_lock(&mchan
->lock
);
341 out_8(&mdma
->regs
->dmacint
, ch
+ off
);
342 out_8(&mdma
->regs
->dmacerr
, ch
+ off
);
344 /* Check error status */
346 list_for_each_entry(mdesc
, &mchan
->active
, node
)
349 /* Execute queued descriptors */
350 list_splice_tail_init(&mchan
->active
, &mchan
->completed
);
351 if (!list_empty(&mchan
->queued
))
352 mpc_dma_execute(mchan
);
354 spin_unlock(&mchan
->lock
);
358 /* Interrupt handler */
359 static irqreturn_t
mpc_dma_irq(int irq
, void *data
)
361 struct mpc_dma
*mdma
= data
;
364 /* Save error status register */
365 es
= in_be32(&mdma
->regs
->dmaes
);
366 spin_lock(&mdma
->error_status_lock
);
367 if ((es
& MPC_DMA_DMAES_VLD
) && mdma
->error_status
== 0)
368 mdma
->error_status
= es
;
369 spin_unlock(&mdma
->error_status_lock
);
371 /* Handle interrupt on each channel */
372 if (mdma
->dma
.chancnt
> 32) {
373 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmainth
),
374 in_be32(&mdma
->regs
->dmaerrh
), 32);
376 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmaintl
),
377 in_be32(&mdma
->regs
->dmaerrl
), 0);
379 /* Schedule tasklet */
380 tasklet_schedule(&mdma
->tasklet
);
385 /* process completed descriptors */
386 static void mpc_dma_process_completed(struct mpc_dma
*mdma
)
388 dma_cookie_t last_cookie
= 0;
389 struct mpc_dma_chan
*mchan
;
390 struct mpc_dma_desc
*mdesc
;
391 struct dma_async_tx_descriptor
*desc
;
396 for (i
= 0; i
< mdma
->dma
.chancnt
; i
++) {
397 mchan
= &mdma
->channels
[i
];
399 /* Get all completed descriptors */
400 spin_lock_irqsave(&mchan
->lock
, flags
);
401 if (!list_empty(&mchan
->completed
))
402 list_splice_tail_init(&mchan
->completed
, &list
);
403 spin_unlock_irqrestore(&mchan
->lock
, flags
);
405 if (list_empty(&list
))
408 /* Execute callbacks and run dependencies */
409 list_for_each_entry(mdesc
, &list
, node
) {
413 desc
->callback(desc
->callback_param
);
415 last_cookie
= desc
->cookie
;
416 dma_run_dependencies(desc
);
419 /* Free descriptors */
420 spin_lock_irqsave(&mchan
->lock
, flags
);
421 list_splice_tail_init(&list
, &mchan
->free
);
422 mchan
->chan
.completed_cookie
= last_cookie
;
423 spin_unlock_irqrestore(&mchan
->lock
, flags
);
428 static void mpc_dma_tasklet(unsigned long data
)
430 struct mpc_dma
*mdma
= (void *)data
;
434 spin_lock_irqsave(&mdma
->error_status_lock
, flags
);
435 es
= mdma
->error_status
;
436 mdma
->error_status
= 0;
437 spin_unlock_irqrestore(&mdma
->error_status_lock
, flags
);
439 /* Print nice error report */
441 dev_err(mdma
->dma
.dev
,
442 "Hardware reported following error(s) on channel %u:\n",
443 MPC_DMA_DMAES_ERRCHN(es
));
445 if (es
& MPC_DMA_DMAES_GPE
)
446 dev_err(mdma
->dma
.dev
, "- Group Priority Error\n");
447 if (es
& MPC_DMA_DMAES_CPE
)
448 dev_err(mdma
->dma
.dev
, "- Channel Priority Error\n");
449 if (es
& MPC_DMA_DMAES_SAE
)
450 dev_err(mdma
->dma
.dev
, "- Source Address Error\n");
451 if (es
& MPC_DMA_DMAES_SOE
)
452 dev_err(mdma
->dma
.dev
, "- Source Offset"
453 " Configuration Error\n");
454 if (es
& MPC_DMA_DMAES_DAE
)
455 dev_err(mdma
->dma
.dev
, "- Destination Address"
457 if (es
& MPC_DMA_DMAES_DOE
)
458 dev_err(mdma
->dma
.dev
, "- Destination Offset"
459 " Configuration Error\n");
460 if (es
& MPC_DMA_DMAES_NCE
)
461 dev_err(mdma
->dma
.dev
, "- NBytes/Citter"
462 " Configuration Error\n");
463 if (es
& MPC_DMA_DMAES_SGE
)
464 dev_err(mdma
->dma
.dev
, "- Scatter/Gather"
465 " Configuration Error\n");
466 if (es
& MPC_DMA_DMAES_SBE
)
467 dev_err(mdma
->dma
.dev
, "- Source Bus Error\n");
468 if (es
& MPC_DMA_DMAES_DBE
)
469 dev_err(mdma
->dma
.dev
, "- Destination Bus Error\n");
472 mpc_dma_process_completed(mdma
);
475 /* Submit descriptor to hardware */
476 static dma_cookie_t
mpc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
478 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(txd
->chan
);
479 struct mpc_dma_desc
*mdesc
;
483 mdesc
= container_of(txd
, struct mpc_dma_desc
, desc
);
485 spin_lock_irqsave(&mchan
->lock
, flags
);
487 /* Move descriptor to queue */
488 list_move_tail(&mdesc
->node
, &mchan
->queued
);
490 /* If channel is idle, execute all queued descriptors */
491 if (list_empty(&mchan
->active
))
492 mpc_dma_execute(mchan
);
495 cookie
= dma_cookie_assign(txd
);
496 spin_unlock_irqrestore(&mchan
->lock
, flags
);
501 /* Alloc channel resources */
502 static int mpc_dma_alloc_chan_resources(struct dma_chan
*chan
)
504 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
505 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
506 struct mpc_dma_desc
*mdesc
;
507 struct mpc_dma_tcd
*tcd
;
508 dma_addr_t tcd_paddr
;
513 /* Alloc DMA memory for Transfer Control Descriptors */
514 tcd
= dma_alloc_coherent(mdma
->dma
.dev
,
515 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
516 &tcd_paddr
, GFP_KERNEL
);
520 /* Alloc descriptors for this channel */
521 for (i
= 0; i
< MPC_DMA_DESCRIPTORS
; i
++) {
522 mdesc
= kzalloc(sizeof(struct mpc_dma_desc
), GFP_KERNEL
);
524 dev_notice(mdma
->dma
.dev
, "Memory allocation error. "
525 "Allocated only %u descriptors\n", i
);
529 dma_async_tx_descriptor_init(&mdesc
->desc
, chan
);
530 mdesc
->desc
.flags
= DMA_CTRL_ACK
;
531 mdesc
->desc
.tx_submit
= mpc_dma_tx_submit
;
533 mdesc
->tcd
= &tcd
[i
];
534 mdesc
->tcd_paddr
= tcd_paddr
+ (i
* sizeof(struct mpc_dma_tcd
));
536 list_add_tail(&mdesc
->node
, &descs
);
539 /* Return error only if no descriptors were allocated */
541 dma_free_coherent(mdma
->dma
.dev
,
542 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
547 spin_lock_irqsave(&mchan
->lock
, flags
);
549 mchan
->tcd_paddr
= tcd_paddr
;
550 list_splice_tail_init(&descs
, &mchan
->free
);
551 spin_unlock_irqrestore(&mchan
->lock
, flags
);
553 /* Enable Error Interrupt */
554 out_8(&mdma
->regs
->dmaseei
, chan
->chan_id
);
559 /* Free channel resources */
560 static void mpc_dma_free_chan_resources(struct dma_chan
*chan
)
562 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
563 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
564 struct mpc_dma_desc
*mdesc
, *tmp
;
565 struct mpc_dma_tcd
*tcd
;
566 dma_addr_t tcd_paddr
;
570 spin_lock_irqsave(&mchan
->lock
, flags
);
572 /* Channel must be idle */
573 BUG_ON(!list_empty(&mchan
->prepared
));
574 BUG_ON(!list_empty(&mchan
->queued
));
575 BUG_ON(!list_empty(&mchan
->active
));
576 BUG_ON(!list_empty(&mchan
->completed
));
579 list_splice_tail_init(&mchan
->free
, &descs
);
581 tcd_paddr
= mchan
->tcd_paddr
;
583 spin_unlock_irqrestore(&mchan
->lock
, flags
);
585 /* Free DMA memory used by descriptors */
586 dma_free_coherent(mdma
->dma
.dev
,
587 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
590 /* Free descriptors */
591 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
)
594 /* Disable Error Interrupt */
595 out_8(&mdma
->regs
->dmaceei
, chan
->chan_id
);
598 /* Send all pending descriptor to hardware */
599 static void mpc_dma_issue_pending(struct dma_chan
*chan
)
602 * We are posting descriptors to the hardware as soon as
603 * they are ready, so this function does nothing.
607 /* Check request completion status */
608 static enum dma_status
609 mpc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
610 struct dma_tx_state
*txstate
)
612 return dma_cookie_status(chan
, cookie
, txstate
);
615 /* Prepare descriptor for memory to memory copy */
616 static struct dma_async_tx_descriptor
*
617 mpc_dma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t src
,
618 size_t len
, unsigned long flags
)
620 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
621 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
622 struct mpc_dma_desc
*mdesc
= NULL
;
623 struct mpc_dma_tcd
*tcd
;
624 unsigned long iflags
;
626 /* Get free descriptor */
627 spin_lock_irqsave(&mchan
->lock
, iflags
);
628 if (!list_empty(&mchan
->free
)) {
629 mdesc
= list_first_entry(&mchan
->free
, struct mpc_dma_desc
,
631 list_del(&mdesc
->node
);
633 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
636 /* try to free completed descriptors */
637 mpc_dma_process_completed(mdma
);
642 mdesc
->will_access_peripheral
= 0;
645 /* Prepare Transfer Control Descriptor for this transaction */
646 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
648 if (IS_ALIGNED(src
| dst
| len
, 32)) {
649 tcd
->ssize
= MPC_DMA_TSIZE_32
;
650 tcd
->dsize
= MPC_DMA_TSIZE_32
;
653 } else if (!mdma
->is_mpc8308
&& IS_ALIGNED(src
| dst
| len
, 16)) {
654 /* MPC8308 doesn't support 16 byte transfers */
655 tcd
->ssize
= MPC_DMA_TSIZE_16
;
656 tcd
->dsize
= MPC_DMA_TSIZE_16
;
659 } else if (IS_ALIGNED(src
| dst
| len
, 4)) {
660 tcd
->ssize
= MPC_DMA_TSIZE_4
;
661 tcd
->dsize
= MPC_DMA_TSIZE_4
;
664 } else if (IS_ALIGNED(src
| dst
| len
, 2)) {
665 tcd
->ssize
= MPC_DMA_TSIZE_2
;
666 tcd
->dsize
= MPC_DMA_TSIZE_2
;
670 tcd
->ssize
= MPC_DMA_TSIZE_1
;
671 tcd
->dsize
= MPC_DMA_TSIZE_1
;
682 /* Place descriptor in prepared list */
683 spin_lock_irqsave(&mchan
->lock
, iflags
);
684 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
685 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
690 static struct dma_async_tx_descriptor
*
691 mpc_dma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
692 unsigned int sg_len
, enum dma_transfer_direction direction
,
693 unsigned long flags
, void *context
)
695 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
696 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
697 struct mpc_dma_desc
*mdesc
= NULL
;
698 dma_addr_t per_paddr
;
700 struct mpc_dma_tcd
*tcd
;
701 unsigned long iflags
;
702 struct scatterlist
*sg
;
706 /* Currently there is no proper support for scatter/gather */
710 if (!is_slave_direction(direction
))
713 for_each_sg(sgl
, sg
, sg_len
, i
) {
714 spin_lock_irqsave(&mchan
->lock
, iflags
);
716 mdesc
= list_first_entry(&mchan
->free
,
717 struct mpc_dma_desc
, node
);
719 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
720 /* Try to free completed descriptors */
721 mpc_dma_process_completed(mdma
);
725 list_del(&mdesc
->node
);
727 if (direction
== DMA_DEV_TO_MEM
) {
728 per_paddr
= mchan
->src_per_paddr
;
729 tcd_nunits
= mchan
->src_tcd_nunits
;
731 per_paddr
= mchan
->dst_per_paddr
;
732 tcd_nunits
= mchan
->dst_tcd_nunits
;
735 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
737 if (per_paddr
== 0 || tcd_nunits
== 0)
741 mdesc
->will_access_peripheral
= 1;
743 /* Prepare Transfer Control Descriptor for this transaction */
746 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
748 if (!IS_ALIGNED(sg_dma_address(sg
), 4))
751 if (direction
== DMA_DEV_TO_MEM
) {
752 tcd
->saddr
= per_paddr
;
753 tcd
->daddr
= sg_dma_address(sg
);
757 tcd
->saddr
= sg_dma_address(sg
);
758 tcd
->daddr
= per_paddr
;
763 tcd
->ssize
= MPC_DMA_TSIZE_4
;
764 tcd
->dsize
= MPC_DMA_TSIZE_4
;
766 len
= sg_dma_len(sg
);
767 tcd
->nbytes
= tcd_nunits
* 4;
768 if (!IS_ALIGNED(len
, tcd
->nbytes
))
771 iter
= len
/ tcd
->nbytes
;
772 if (iter
>= 1 << 15) {
776 /* citer_linkch contains the high bits of iter */
777 tcd
->biter
= iter
& 0x1ff;
778 tcd
->biter_linkch
= iter
>> 9;
779 tcd
->citer
= tcd
->biter
;
780 tcd
->citer_linkch
= tcd
->biter_linkch
;
785 /* Place descriptor in prepared list */
786 spin_lock_irqsave(&mchan
->lock
, iflags
);
787 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
788 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
794 /* Put the descriptor back */
795 spin_lock_irqsave(&mchan
->lock
, iflags
);
796 list_add_tail(&mdesc
->node
, &mchan
->free
);
797 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
802 static int mpc_dma_device_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
805 struct mpc_dma_chan
*mchan
;
806 struct mpc_dma
*mdma
;
807 struct dma_slave_config
*cfg
;
810 mchan
= dma_chan_to_mpc_dma_chan(chan
);
812 case DMA_TERMINATE_ALL
:
813 /* Disable channel requests */
814 mdma
= dma_chan_to_mpc_dma(chan
);
816 spin_lock_irqsave(&mchan
->lock
, flags
);
818 out_8(&mdma
->regs
->dmacerq
, chan
->chan_id
);
819 list_splice_tail_init(&mchan
->prepared
, &mchan
->free
);
820 list_splice_tail_init(&mchan
->queued
, &mchan
->free
);
821 list_splice_tail_init(&mchan
->active
, &mchan
->free
);
823 spin_unlock_irqrestore(&mchan
->lock
, flags
);
827 case DMA_SLAVE_CONFIG
:
829 * Software constraints:
830 * - only transfers between a peripheral device and
831 * memory are supported;
832 * - only peripheral devices with 4-byte FIFO access register
834 * - minimal transfer chunk is 4 bytes and consequently
835 * source and destination addresses must be 4-byte aligned
836 * and transfer size must be aligned on (4 * maxburst)
838 * - during the transfer RAM address is being incremented by
839 * the size of minimal transfer chunk;
840 * - peripheral port's address is constant during the transfer.
845 if (cfg
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
||
846 cfg
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
||
847 !IS_ALIGNED(cfg
->src_addr
, 4) ||
848 !IS_ALIGNED(cfg
->dst_addr
, 4)) {
852 spin_lock_irqsave(&mchan
->lock
, flags
);
854 mchan
->src_per_paddr
= cfg
->src_addr
;
855 mchan
->src_tcd_nunits
= cfg
->src_maxburst
;
856 mchan
->dst_per_paddr
= cfg
->dst_addr
;
857 mchan
->dst_tcd_nunits
= cfg
->dst_maxburst
;
860 if (mchan
->src_tcd_nunits
== 0)
861 mchan
->src_tcd_nunits
= 1;
862 if (mchan
->dst_tcd_nunits
== 0)
863 mchan
->dst_tcd_nunits
= 1;
865 spin_unlock_irqrestore(&mchan
->lock
, flags
);
870 /* Unknown command */
877 static int mpc_dma_probe(struct platform_device
*op
)
879 struct device_node
*dn
= op
->dev
.of_node
;
880 struct device
*dev
= &op
->dev
;
881 struct dma_device
*dma
;
882 struct mpc_dma
*mdma
;
883 struct mpc_dma_chan
*mchan
;
885 ulong regs_start
, regs_size
;
888 mdma
= devm_kzalloc(dev
, sizeof(struct mpc_dma
), GFP_KERNEL
);
890 dev_err(dev
, "Memory exhausted!\n");
895 mdma
->irq
= irq_of_parse_and_map(dn
, 0);
896 if (mdma
->irq
== NO_IRQ
) {
897 dev_err(dev
, "Error mapping IRQ!\n");
902 if (of_device_is_compatible(dn
, "fsl,mpc8308-dma")) {
903 mdma
->is_mpc8308
= 1;
904 mdma
->irq2
= irq_of_parse_and_map(dn
, 1);
905 if (mdma
->irq2
== NO_IRQ
) {
906 dev_err(dev
, "Error mapping IRQ!\n");
912 retval
= of_address_to_resource(dn
, 0, &res
);
914 dev_err(dev
, "Error parsing memory region!\n");
918 regs_start
= res
.start
;
919 regs_size
= resource_size(&res
);
921 if (!devm_request_mem_region(dev
, regs_start
, regs_size
, DRV_NAME
)) {
922 dev_err(dev
, "Error requesting memory region!\n");
927 mdma
->regs
= devm_ioremap(dev
, regs_start
, regs_size
);
929 dev_err(dev
, "Error mapping memory region!\n");
934 mdma
->tcd
= (struct mpc_dma_tcd
*)((u8
*)(mdma
->regs
)
935 + MPC_DMA_TCD_OFFSET
);
937 retval
= request_irq(mdma
->irq
, &mpc_dma_irq
, 0, DRV_NAME
, mdma
);
939 dev_err(dev
, "Error requesting IRQ!\n");
944 if (mdma
->is_mpc8308
) {
945 retval
= request_irq(mdma
->irq2
, &mpc_dma_irq
, 0,
948 dev_err(dev
, "Error requesting IRQ2!\n");
954 spin_lock_init(&mdma
->error_status_lock
);
958 if (mdma
->is_mpc8308
)
959 dma
->chancnt
= MPC8308_DMACHAN_MAX
;
961 dma
->chancnt
= MPC512x_DMACHAN_MAX
;
962 dma
->device_alloc_chan_resources
= mpc_dma_alloc_chan_resources
;
963 dma
->device_free_chan_resources
= mpc_dma_free_chan_resources
;
964 dma
->device_issue_pending
= mpc_dma_issue_pending
;
965 dma
->device_tx_status
= mpc_dma_tx_status
;
966 dma
->device_prep_dma_memcpy
= mpc_dma_prep_memcpy
;
967 dma
->device_prep_slave_sg
= mpc_dma_prep_slave_sg
;
968 dma
->device_control
= mpc_dma_device_control
;
970 INIT_LIST_HEAD(&dma
->channels
);
971 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
972 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
974 for (i
= 0; i
< dma
->chancnt
; i
++) {
975 mchan
= &mdma
->channels
[i
];
977 mchan
->chan
.device
= dma
;
978 dma_cookie_init(&mchan
->chan
);
980 INIT_LIST_HEAD(&mchan
->free
);
981 INIT_LIST_HEAD(&mchan
->prepared
);
982 INIT_LIST_HEAD(&mchan
->queued
);
983 INIT_LIST_HEAD(&mchan
->active
);
984 INIT_LIST_HEAD(&mchan
->completed
);
986 spin_lock_init(&mchan
->lock
);
987 list_add_tail(&mchan
->chan
.device_node
, &dma
->channels
);
990 tasklet_init(&mdma
->tasklet
, mpc_dma_tasklet
, (unsigned long)mdma
);
993 * Configure DMA Engine:
995 * - Round-robin group arbitration,
996 * - Round-robin channel arbitration.
998 if (mdma
->is_mpc8308
) {
999 /* MPC8308 has 16 channels and lacks some registers */
1000 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_ERCA
);
1002 /* enable snooping */
1003 out_be32(&mdma
->regs
->dmagpor
, MPC_DMA_DMAGPOR_SNOOP_ENABLE
);
1004 /* Disable error interrupts */
1005 out_be32(&mdma
->regs
->dmaeeil
, 0);
1007 /* Clear interrupts status */
1008 out_be32(&mdma
->regs
->dmaintl
, 0xFFFF);
1009 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFF);
1011 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_EDCG
|
1012 MPC_DMA_DMACR_ERGA
| MPC_DMA_DMACR_ERCA
);
1014 /* Disable hardware DMA requests */
1015 out_be32(&mdma
->regs
->dmaerqh
, 0);
1016 out_be32(&mdma
->regs
->dmaerql
, 0);
1018 /* Disable error interrupts */
1019 out_be32(&mdma
->regs
->dmaeeih
, 0);
1020 out_be32(&mdma
->regs
->dmaeeil
, 0);
1022 /* Clear interrupts status */
1023 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
1024 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
1025 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
1026 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
1028 /* Route interrupts to IPIC */
1029 out_be32(&mdma
->regs
->dmaihsa
, 0);
1030 out_be32(&mdma
->regs
->dmailsa
, 0);
1033 /* Register DMA engine */
1034 dev_set_drvdata(dev
, mdma
);
1035 retval
= dma_async_device_register(dma
);
1042 if (mdma
->is_mpc8308
)
1043 free_irq(mdma
->irq2
, mdma
);
1045 free_irq(mdma
->irq
, mdma
);
1047 if (mdma
->is_mpc8308
)
1048 irq_dispose_mapping(mdma
->irq2
);
1050 irq_dispose_mapping(mdma
->irq
);
1055 static int mpc_dma_remove(struct platform_device
*op
)
1057 struct device
*dev
= &op
->dev
;
1058 struct mpc_dma
*mdma
= dev_get_drvdata(dev
);
1060 dma_async_device_unregister(&mdma
->dma
);
1061 if (mdma
->is_mpc8308
) {
1062 free_irq(mdma
->irq2
, mdma
);
1063 irq_dispose_mapping(mdma
->irq2
);
1065 free_irq(mdma
->irq
, mdma
);
1066 irq_dispose_mapping(mdma
->irq
);
1071 static struct of_device_id mpc_dma_match
[] = {
1072 { .compatible
= "fsl,mpc5121-dma", },
1073 { .compatible
= "fsl,mpc8308-dma", },
1077 static struct platform_driver mpc_dma_driver
= {
1078 .probe
= mpc_dma_probe
,
1079 .remove
= mpc_dma_remove
,
1082 .owner
= THIS_MODULE
,
1083 .of_match_table
= mpc_dma_match
,
1087 module_platform_driver(mpc_dma_driver
);
1089 MODULE_LICENSE("GPL");
1090 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");