2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_platform.h>
19 #include <linux/sirfsoc_dma.h>
21 #include "dmaengine.h"
23 #define SIRFSOC_DMA_DESCRIPTORS 16
24 #define SIRFSOC_DMA_CHANNELS 16
26 #define SIRFSOC_DMA_CH_ADDR 0x00
27 #define SIRFSOC_DMA_CH_XLEN 0x04
28 #define SIRFSOC_DMA_CH_YLEN 0x08
29 #define SIRFSOC_DMA_CH_CTRL 0x0C
31 #define SIRFSOC_DMA_WIDTH_0 0x100
32 #define SIRFSOC_DMA_CH_VALID 0x140
33 #define SIRFSOC_DMA_CH_INT 0x144
34 #define SIRFSOC_DMA_INT_EN 0x148
35 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
36 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
37 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
39 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
40 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
42 /* xlen and dma_width register is in 4 bytes boundary */
43 #define SIRFSOC_DMA_WORD_LEN 4
45 struct sirfsoc_dma_desc
{
46 struct dma_async_tx_descriptor desc
;
47 struct list_head node
;
49 /* SiRFprimaII 2D-DMA parameters */
51 int xlen
; /* DMA xlen */
52 int ylen
; /* DMA ylen */
53 int width
; /* DMA width */
55 bool cyclic
; /* is loop DMA? */
56 u32 addr
; /* DMA buffer address */
59 struct sirfsoc_dma_chan
{
61 struct list_head free
;
62 struct list_head prepared
;
63 struct list_head queued
;
64 struct list_head active
;
65 struct list_head completed
;
66 unsigned long happened_cyclic
;
67 unsigned long completed_cyclic
;
69 /* Lock for this structure */
76 struct dma_device dma
;
77 struct tasklet_struct tasklet
;
78 struct sirfsoc_dma_chan channels
[SIRFSOC_DMA_CHANNELS
];
84 #define DRV_NAME "sirfsoc_dma"
86 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
88 struct sirfsoc_dma_chan
*dma_chan_to_sirfsoc_dma_chan(struct dma_chan
*c
)
90 return container_of(c
, struct sirfsoc_dma_chan
, chan
);
93 /* Convert struct dma_chan to struct sirfsoc_dma */
94 static inline struct sirfsoc_dma
*dma_chan_to_sirfsoc_dma(struct dma_chan
*c
)
96 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(c
);
97 return container_of(schan
, struct sirfsoc_dma
, channels
[c
->chan_id
]);
100 /* Execute all queued DMA descriptors */
101 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan
*schan
)
103 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
104 int cid
= schan
->chan
.chan_id
;
105 struct sirfsoc_dma_desc
*sdesc
= NULL
;
108 * lock has been held by functions calling this, so we don't hold
112 sdesc
= list_first_entry(&schan
->queued
, struct sirfsoc_dma_desc
,
114 /* Move the first queued descriptor to active list */
115 list_move_tail(&sdesc
->node
, &schan
->active
);
117 /* Start the DMA transfer */
118 writel_relaxed(sdesc
->width
, sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+
120 writel_relaxed(cid
| (schan
->mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
121 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
122 sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
123 writel_relaxed(sdesc
->xlen
, sdma
->base
+ cid
* 0x10 +
124 SIRFSOC_DMA_CH_XLEN
);
125 writel_relaxed(sdesc
->ylen
, sdma
->base
+ cid
* 0x10 +
126 SIRFSOC_DMA_CH_YLEN
);
127 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) |
128 (1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
131 * writel has an implict memory write barrier to make sure data is
132 * flushed into memory before starting DMA
134 writel(sdesc
->addr
>> 2, sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
137 writel((1 << cid
) | 1 << (cid
+ 16) |
138 readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
),
139 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
140 schan
->happened_cyclic
= schan
->completed_cyclic
= 0;
144 /* Interrupt handler */
145 static irqreturn_t
sirfsoc_dma_irq(int irq
, void *data
)
147 struct sirfsoc_dma
*sdma
= data
;
148 struct sirfsoc_dma_chan
*schan
;
149 struct sirfsoc_dma_desc
*sdesc
= NULL
;
153 is
= readl(sdma
->base
+ SIRFSOC_DMA_CH_INT
);
154 while ((ch
= fls(is
) - 1) >= 0) {
156 writel_relaxed(1 << ch
, sdma
->base
+ SIRFSOC_DMA_CH_INT
);
157 schan
= &sdma
->channels
[ch
];
159 spin_lock(&schan
->lock
);
161 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
163 if (!sdesc
->cyclic
) {
164 /* Execute queued descriptors */
165 list_splice_tail_init(&schan
->active
, &schan
->completed
);
166 if (!list_empty(&schan
->queued
))
167 sirfsoc_dma_execute(schan
);
169 schan
->happened_cyclic
++;
171 spin_unlock(&schan
->lock
);
174 /* Schedule tasklet */
175 tasklet_schedule(&sdma
->tasklet
);
180 /* process completed descriptors */
181 static void sirfsoc_dma_process_completed(struct sirfsoc_dma
*sdma
)
183 dma_cookie_t last_cookie
= 0;
184 struct sirfsoc_dma_chan
*schan
;
185 struct sirfsoc_dma_desc
*sdesc
;
186 struct dma_async_tx_descriptor
*desc
;
188 unsigned long happened_cyclic
;
192 for (i
= 0; i
< sdma
->dma
.chancnt
; i
++) {
193 schan
= &sdma
->channels
[i
];
195 /* Get all completed descriptors */
196 spin_lock_irqsave(&schan
->lock
, flags
);
197 if (!list_empty(&schan
->completed
)) {
198 list_splice_tail_init(&schan
->completed
, &list
);
199 spin_unlock_irqrestore(&schan
->lock
, flags
);
201 /* Execute callbacks and run dependencies */
202 list_for_each_entry(sdesc
, &list
, node
) {
206 desc
->callback(desc
->callback_param
);
208 last_cookie
= desc
->cookie
;
209 dma_run_dependencies(desc
);
212 /* Free descriptors */
213 spin_lock_irqsave(&schan
->lock
, flags
);
214 list_splice_tail_init(&list
, &schan
->free
);
215 schan
->chan
.completed_cookie
= last_cookie
;
216 spin_unlock_irqrestore(&schan
->lock
, flags
);
218 /* for cyclic channel, desc is always in active list */
219 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
222 if (!sdesc
|| (sdesc
&& !sdesc
->cyclic
)) {
223 /* without active cyclic DMA */
224 spin_unlock_irqrestore(&schan
->lock
, flags
);
229 happened_cyclic
= schan
->happened_cyclic
;
230 spin_unlock_irqrestore(&schan
->lock
, flags
);
233 while (happened_cyclic
!= schan
->completed_cyclic
) {
235 desc
->callback(desc
->callback_param
);
236 schan
->completed_cyclic
++;
243 static void sirfsoc_dma_tasklet(unsigned long data
)
245 struct sirfsoc_dma
*sdma
= (void *)data
;
247 sirfsoc_dma_process_completed(sdma
);
250 /* Submit descriptor to hardware */
251 static dma_cookie_t
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
253 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(txd
->chan
);
254 struct sirfsoc_dma_desc
*sdesc
;
258 sdesc
= container_of(txd
, struct sirfsoc_dma_desc
, desc
);
260 spin_lock_irqsave(&schan
->lock
, flags
);
262 /* Move descriptor to queue */
263 list_move_tail(&sdesc
->node
, &schan
->queued
);
265 cookie
= dma_cookie_assign(txd
);
267 spin_unlock_irqrestore(&schan
->lock
, flags
);
272 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan
*schan
,
273 struct dma_slave_config
*config
)
277 if ((config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) ||
278 (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
))
281 spin_lock_irqsave(&schan
->lock
, flags
);
282 schan
->mode
= (config
->src_maxburst
== 4 ? 1 : 0);
283 spin_unlock_irqrestore(&schan
->lock
, flags
);
288 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan
*schan
)
290 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
291 int cid
= schan
->chan
.chan_id
;
294 spin_lock_irqsave(&schan
->lock
, flags
);
296 if (!sdma
->is_marco
) {
297 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) &
298 ~(1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
299 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
300 & ~((1 << cid
) | 1 << (cid
+ 16)),
301 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
303 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_INT_EN_CLR
);
304 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
305 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
308 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
310 list_splice_tail_init(&schan
->active
, &schan
->free
);
311 list_splice_tail_init(&schan
->queued
, &schan
->free
);
313 spin_unlock_irqrestore(&schan
->lock
, flags
);
318 static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan
*schan
)
320 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
321 int cid
= schan
->chan
.chan_id
;
324 spin_lock_irqsave(&schan
->lock
, flags
);
327 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
328 & ~((1 << cid
) | 1 << (cid
+ 16)),
329 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
331 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
332 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
334 spin_unlock_irqrestore(&schan
->lock
, flags
);
339 static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan
*schan
)
341 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
342 int cid
= schan
->chan
.chan_id
;
345 spin_lock_irqsave(&schan
->lock
, flags
);
348 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
349 | ((1 << cid
) | 1 << (cid
+ 16)),
350 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
352 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
353 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
355 spin_unlock_irqrestore(&schan
->lock
, flags
);
360 static int sirfsoc_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
363 struct dma_slave_config
*config
;
364 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
368 return sirfsoc_dma_pause_chan(schan
);
370 return sirfsoc_dma_resume_chan(schan
);
371 case DMA_TERMINATE_ALL
:
372 return sirfsoc_dma_terminate_all(schan
);
373 case DMA_SLAVE_CONFIG
:
374 config
= (struct dma_slave_config
*)arg
;
375 return sirfsoc_dma_slave_config(schan
, config
);
384 /* Alloc channel resources */
385 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan
*chan
)
387 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
388 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
389 struct sirfsoc_dma_desc
*sdesc
;
394 /* Alloc descriptors for this channel */
395 for (i
= 0; i
< SIRFSOC_DMA_DESCRIPTORS
; i
++) {
396 sdesc
= kzalloc(sizeof(*sdesc
), GFP_KERNEL
);
398 dev_notice(sdma
->dma
.dev
, "Memory allocation error. "
399 "Allocated only %u descriptors\n", i
);
403 dma_async_tx_descriptor_init(&sdesc
->desc
, chan
);
404 sdesc
->desc
.flags
= DMA_CTRL_ACK
;
405 sdesc
->desc
.tx_submit
= sirfsoc_dma_tx_submit
;
407 list_add_tail(&sdesc
->node
, &descs
);
410 /* Return error only if no descriptors were allocated */
414 spin_lock_irqsave(&schan
->lock
, flags
);
416 list_splice_tail_init(&descs
, &schan
->free
);
417 spin_unlock_irqrestore(&schan
->lock
, flags
);
422 /* Free channel resources */
423 static void sirfsoc_dma_free_chan_resources(struct dma_chan
*chan
)
425 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
426 struct sirfsoc_dma_desc
*sdesc
, *tmp
;
430 spin_lock_irqsave(&schan
->lock
, flags
);
432 /* Channel must be idle */
433 BUG_ON(!list_empty(&schan
->prepared
));
434 BUG_ON(!list_empty(&schan
->queued
));
435 BUG_ON(!list_empty(&schan
->active
));
436 BUG_ON(!list_empty(&schan
->completed
));
439 list_splice_tail_init(&schan
->free
, &descs
);
441 spin_unlock_irqrestore(&schan
->lock
, flags
);
443 /* Free descriptors */
444 list_for_each_entry_safe(sdesc
, tmp
, &descs
, node
)
448 /* Send pending descriptor to hardware */
449 static void sirfsoc_dma_issue_pending(struct dma_chan
*chan
)
451 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
454 spin_lock_irqsave(&schan
->lock
, flags
);
456 if (list_empty(&schan
->active
) && !list_empty(&schan
->queued
))
457 sirfsoc_dma_execute(schan
);
459 spin_unlock_irqrestore(&schan
->lock
, flags
);
462 /* Check request completion status */
463 static enum dma_status
464 sirfsoc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
465 struct dma_tx_state
*txstate
)
467 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
471 spin_lock_irqsave(&schan
->lock
, flags
);
472 ret
= dma_cookie_status(chan
, cookie
, txstate
);
473 spin_unlock_irqrestore(&schan
->lock
, flags
);
478 static struct dma_async_tx_descriptor
*sirfsoc_dma_prep_interleaved(
479 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
482 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
483 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
484 struct sirfsoc_dma_desc
*sdesc
= NULL
;
485 unsigned long iflags
;
488 if ((xt
->dir
!= DMA_MEM_TO_DEV
) && (xt
->dir
!= DMA_DEV_TO_MEM
)) {
493 /* Get free descriptor */
494 spin_lock_irqsave(&schan
->lock
, iflags
);
495 if (!list_empty(&schan
->free
)) {
496 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
498 list_del(&sdesc
->node
);
500 spin_unlock_irqrestore(&schan
->lock
, iflags
);
503 /* try to free completed descriptors */
504 sirfsoc_dma_process_completed(sdma
);
509 /* Place descriptor in prepared list */
510 spin_lock_irqsave(&schan
->lock
, iflags
);
513 * Number of chunks in a frame can only be 1 for prima2
514 * and ylen (number of frame - 1) must be at least 0
516 if ((xt
->frame_size
== 1) && (xt
->numf
> 0)) {
518 sdesc
->xlen
= xt
->sgl
[0].size
/ SIRFSOC_DMA_WORD_LEN
;
519 sdesc
->width
= (xt
->sgl
[0].size
+ xt
->sgl
[0].icg
) /
520 SIRFSOC_DMA_WORD_LEN
;
521 sdesc
->ylen
= xt
->numf
- 1;
522 if (xt
->dir
== DMA_MEM_TO_DEV
) {
523 sdesc
->addr
= xt
->src_start
;
526 sdesc
->addr
= xt
->dst_start
;
530 list_add_tail(&sdesc
->node
, &schan
->prepared
);
532 pr_err("sirfsoc DMA Invalid xfer\n");
536 spin_unlock_irqrestore(&schan
->lock
, iflags
);
540 spin_unlock_irqrestore(&schan
->lock
, iflags
);
546 static struct dma_async_tx_descriptor
*
547 sirfsoc_dma_prep_cyclic(struct dma_chan
*chan
, dma_addr_t addr
,
548 size_t buf_len
, size_t period_len
,
549 enum dma_transfer_direction direction
, unsigned long flags
, void *context
)
551 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
552 struct sirfsoc_dma_desc
*sdesc
= NULL
;
553 unsigned long iflags
;
556 * we only support cycle transfer with 2 period
557 * If the X-length is set to 0, it would be the loop mode.
558 * The DMA address keeps increasing until reaching the end of a loop
559 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
560 * the DMA address goes back to the beginning of this area.
561 * In loop mode, the DMA data region is divided into two parts, BUFA
562 * and BUFB. DMA controller generates interrupts twice in each loop:
563 * when the DMA address reaches the end of BUFA or the end of the
566 if (buf_len
!= 2 * period_len
)
567 return ERR_PTR(-EINVAL
);
569 /* Get free descriptor */
570 spin_lock_irqsave(&schan
->lock
, iflags
);
571 if (!list_empty(&schan
->free
)) {
572 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
574 list_del(&sdesc
->node
);
576 spin_unlock_irqrestore(&schan
->lock
, iflags
);
581 /* Place descriptor in prepared list */
582 spin_lock_irqsave(&schan
->lock
, iflags
);
586 sdesc
->ylen
= buf_len
/ SIRFSOC_DMA_WORD_LEN
- 1;
588 list_add_tail(&sdesc
->node
, &schan
->prepared
);
589 spin_unlock_irqrestore(&schan
->lock
, iflags
);
595 * The DMA controller consists of 16 independent DMA channels.
596 * Each channel is allocated to a different function
598 bool sirfsoc_dma_filter_id(struct dma_chan
*chan
, void *chan_id
)
600 unsigned int ch_nr
= (unsigned int) chan_id
;
602 if (ch_nr
== chan
->chan_id
+
603 chan
->device
->dev_id
* SIRFSOC_DMA_CHANNELS
)
608 EXPORT_SYMBOL(sirfsoc_dma_filter_id
);
610 static int sirfsoc_dma_probe(struct platform_device
*op
)
612 struct device_node
*dn
= op
->dev
.of_node
;
613 struct device
*dev
= &op
->dev
;
614 struct dma_device
*dma
;
615 struct sirfsoc_dma
*sdma
;
616 struct sirfsoc_dma_chan
*schan
;
618 ulong regs_start
, regs_size
;
622 sdma
= devm_kzalloc(dev
, sizeof(*sdma
), GFP_KERNEL
);
624 dev_err(dev
, "Memory exhausted!\n");
628 if (of_device_is_compatible(dn
, "sirf,marco-dmac"))
629 sdma
->is_marco
= true;
631 if (of_property_read_u32(dn
, "cell-index", &id
)) {
632 dev_err(dev
, "Fail to get DMAC index\n");
636 sdma
->irq
= irq_of_parse_and_map(dn
, 0);
637 if (sdma
->irq
== NO_IRQ
) {
638 dev_err(dev
, "Error mapping IRQ!\n");
642 ret
= of_address_to_resource(dn
, 0, &res
);
644 dev_err(dev
, "Error parsing memory region!\n");
648 regs_start
= res
.start
;
649 regs_size
= resource_size(&res
);
651 sdma
->base
= devm_ioremap(dev
, regs_start
, regs_size
);
653 dev_err(dev
, "Error mapping memory region!\n");
658 ret
= request_irq(sdma
->irq
, &sirfsoc_dma_irq
, 0, DRV_NAME
, sdma
);
660 dev_err(dev
, "Error requesting IRQ!\n");
667 dma
->chancnt
= SIRFSOC_DMA_CHANNELS
;
669 dma
->device_alloc_chan_resources
= sirfsoc_dma_alloc_chan_resources
;
670 dma
->device_free_chan_resources
= sirfsoc_dma_free_chan_resources
;
671 dma
->device_issue_pending
= sirfsoc_dma_issue_pending
;
672 dma
->device_control
= sirfsoc_dma_control
;
673 dma
->device_tx_status
= sirfsoc_dma_tx_status
;
674 dma
->device_prep_interleaved_dma
= sirfsoc_dma_prep_interleaved
;
675 dma
->device_prep_dma_cyclic
= sirfsoc_dma_prep_cyclic
;
677 INIT_LIST_HEAD(&dma
->channels
);
678 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
679 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
680 dma_cap_set(DMA_INTERLEAVE
, dma
->cap_mask
);
681 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
683 for (i
= 0; i
< dma
->chancnt
; i
++) {
684 schan
= &sdma
->channels
[i
];
686 schan
->chan
.device
= dma
;
687 dma_cookie_init(&schan
->chan
);
689 INIT_LIST_HEAD(&schan
->free
);
690 INIT_LIST_HEAD(&schan
->prepared
);
691 INIT_LIST_HEAD(&schan
->queued
);
692 INIT_LIST_HEAD(&schan
->active
);
693 INIT_LIST_HEAD(&schan
->completed
);
695 spin_lock_init(&schan
->lock
);
696 list_add_tail(&schan
->chan
.device_node
, &dma
->channels
);
699 tasklet_init(&sdma
->tasklet
, sirfsoc_dma_tasklet
, (unsigned long)sdma
);
701 /* Register DMA engine */
702 dev_set_drvdata(dev
, sdma
);
703 ret
= dma_async_device_register(dma
);
707 dev_info(dev
, "initialized SIRFSOC DMAC driver\n");
712 free_irq(sdma
->irq
, sdma
);
714 irq_dispose_mapping(sdma
->irq
);
718 static int sirfsoc_dma_remove(struct platform_device
*op
)
720 struct device
*dev
= &op
->dev
;
721 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
723 dma_async_device_unregister(&sdma
->dma
);
724 free_irq(sdma
->irq
, sdma
);
725 irq_dispose_mapping(sdma
->irq
);
729 static struct of_device_id sirfsoc_dma_match
[] = {
730 { .compatible
= "sirf,prima2-dmac", },
731 { .compatible
= "sirf,marco-dmac", },
735 static struct platform_driver sirfsoc_dma_driver
= {
736 .probe
= sirfsoc_dma_probe
,
737 .remove
= sirfsoc_dma_remove
,
740 .owner
= THIS_MODULE
,
741 .of_match_table
= sirfsoc_dma_match
,
745 module_platform_driver(sirfsoc_dma_driver
);
747 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
748 "Barry Song <baohua.song@csr.com>");
749 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
750 MODULE_LICENSE("GPL v2");