2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
24 struct dma_device ddev
;
26 struct tasklet_struct task
;
27 struct list_head pending
;
31 struct virt_dma_chan vc
;
32 struct list_head node
;
34 struct dma_slave_config cfg
;
39 struct omap_desc
*desc
;
45 uint32_t en
; /* number of elements (24-bit) */
46 uint32_t fn
; /* number of frames (16-bit) */
50 struct virt_dma_desc vd
;
51 enum dma_transfer_direction dir
;
54 int16_t fi
; /* for OMAP_DMA_SYNC_PACKET */
55 uint8_t es
; /* OMAP_DMA_DATA_TYPE_xxx */
56 uint8_t sync_mode
; /* OMAP_DMA_SYNC_xxx */
57 uint8_t sync_type
; /* OMAP_DMA_xxx_SYNC* */
58 uint8_t periph_port
; /* Peripheral port */
64 static const unsigned es_bytes
[] = {
65 [OMAP_DMA_DATA_TYPE_S8
] = 1,
66 [OMAP_DMA_DATA_TYPE_S16
] = 2,
67 [OMAP_DMA_DATA_TYPE_S32
] = 4,
70 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
72 return container_of(d
, struct omap_dmadev
, ddev
);
75 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
77 return container_of(c
, struct omap_chan
, vc
.chan
);
80 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
82 return container_of(t
, struct omap_desc
, vd
.tx
);
85 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
87 kfree(container_of(vd
, struct omap_desc
, vd
));
90 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
,
93 struct omap_sg
*sg
= d
->sg
+ idx
;
95 if (d
->dir
== DMA_DEV_TO_MEM
)
96 omap_set_dma_dest_params(c
->dma_ch
, OMAP_DMA_PORT_EMIFF
,
97 OMAP_DMA_AMODE_POST_INC
, sg
->addr
, 0, 0);
99 omap_set_dma_src_params(c
->dma_ch
, OMAP_DMA_PORT_EMIFF
,
100 OMAP_DMA_AMODE_POST_INC
, sg
->addr
, 0, 0);
102 omap_set_dma_transfer_params(c
->dma_ch
, d
->es
, sg
->en
, sg
->fn
,
103 d
->sync_mode
, c
->dma_sig
, d
->sync_type
);
105 omap_start_dma(c
->dma_ch
);
108 static void omap_dma_start_desc(struct omap_chan
*c
)
110 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
120 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
123 if (d
->dir
== DMA_DEV_TO_MEM
)
124 omap_set_dma_src_params(c
->dma_ch
, d
->periph_port
,
125 OMAP_DMA_AMODE_CONSTANT
, d
->dev_addr
, 0, d
->fi
);
127 omap_set_dma_dest_params(c
->dma_ch
, d
->periph_port
,
128 OMAP_DMA_AMODE_CONSTANT
, d
->dev_addr
, 0, d
->fi
);
130 omap_dma_start_sg(c
, d
, 0);
133 static void omap_dma_callback(int ch
, u16 status
, void *data
)
135 struct omap_chan
*c
= data
;
139 spin_lock_irqsave(&c
->vc
.lock
, flags
);
143 if (++c
->sgidx
< d
->sglen
) {
144 omap_dma_start_sg(c
, d
, c
->sgidx
);
146 omap_dma_start_desc(c
);
147 vchan_cookie_complete(&d
->vd
);
150 vchan_cyclic_callback(&d
->vd
);
153 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
157 * This callback schedules all pending channels. We could be more
158 * clever here by postponing allocation of the real DMA channels to
159 * this point, and freeing them when our virtual channel becomes idle.
161 * We would then need to deal with 'all channels in-use'
163 static void omap_dma_sched(unsigned long data
)
165 struct omap_dmadev
*d
= (struct omap_dmadev
*)data
;
168 spin_lock_irq(&d
->lock
);
169 list_splice_tail_init(&d
->pending
, &head
);
170 spin_unlock_irq(&d
->lock
);
172 while (!list_empty(&head
)) {
173 struct omap_chan
*c
= list_first_entry(&head
,
174 struct omap_chan
, node
);
176 spin_lock_irq(&c
->vc
.lock
);
177 list_del_init(&c
->node
);
178 omap_dma_start_desc(c
);
179 spin_unlock_irq(&c
->vc
.lock
);
183 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
185 struct omap_chan
*c
= to_omap_dma_chan(chan
);
187 dev_info(c
->vc
.chan
.device
->dev
, "allocating channel for %u\n", c
->dma_sig
);
189 return omap_request_dma(c
->dma_sig
, "DMA engine",
190 omap_dma_callback
, c
, &c
->dma_ch
);
193 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
195 struct omap_chan
*c
= to_omap_dma_chan(chan
);
197 vchan_free_chan_resources(&c
->vc
);
198 omap_free_dma(c
->dma_ch
);
200 dev_info(c
->vc
.chan
.device
->dev
, "freeing channel for %u\n", c
->dma_sig
);
203 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
205 return sg
->en
* sg
->fn
;
208 static size_t omap_dma_desc_size(struct omap_desc
*d
)
213 for (size
= i
= 0; i
< d
->sglen
; i
++)
214 size
+= omap_dma_sg_size(&d
->sg
[i
]);
216 return size
* es_bytes
[d
->es
];
219 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
222 size_t size
, es_size
= es_bytes
[d
->es
];
224 for (size
= i
= 0; i
< d
->sglen
; i
++) {
225 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
229 else if (addr
>= d
->sg
[i
].addr
&&
230 addr
< d
->sg
[i
].addr
+ this_size
)
231 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
236 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
237 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
239 struct omap_chan
*c
= to_omap_dma_chan(chan
);
240 struct virt_dma_desc
*vd
;
244 ret
= dma_cookie_status(chan
, cookie
, txstate
);
245 if (ret
== DMA_SUCCESS
|| !txstate
)
248 spin_lock_irqsave(&c
->vc
.lock
, flags
);
249 vd
= vchan_find_desc(&c
->vc
, cookie
);
251 txstate
->residue
= omap_dma_desc_size(to_omap_dma_desc(&vd
->tx
));
252 } else if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
) {
253 struct omap_desc
*d
= c
->desc
;
256 if (d
->dir
== DMA_MEM_TO_DEV
)
257 pos
= omap_get_dma_src_pos(c
->dma_ch
);
258 else if (d
->dir
== DMA_DEV_TO_MEM
)
259 pos
= omap_get_dma_dst_pos(c
->dma_ch
);
263 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
265 txstate
->residue
= 0;
267 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
272 static void omap_dma_issue_pending(struct dma_chan
*chan
)
274 struct omap_chan
*c
= to_omap_dma_chan(chan
);
277 spin_lock_irqsave(&c
->vc
.lock
, flags
);
278 if (vchan_issue_pending(&c
->vc
) && !c
->desc
) {
279 struct omap_dmadev
*d
= to_omap_dma_dev(chan
->device
);
281 if (list_empty(&c
->node
))
282 list_add_tail(&c
->node
, &d
->pending
);
283 spin_unlock(&d
->lock
);
284 tasklet_schedule(&d
->task
);
286 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
289 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
290 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
291 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
293 struct omap_chan
*c
= to_omap_dma_chan(chan
);
294 enum dma_slave_buswidth dev_width
;
295 struct scatterlist
*sgent
;
298 unsigned i
, j
= 0, es
, en
, frame_bytes
, sync_type
;
301 if (dir
== DMA_DEV_TO_MEM
) {
302 dev_addr
= c
->cfg
.src_addr
;
303 dev_width
= c
->cfg
.src_addr_width
;
304 burst
= c
->cfg
.src_maxburst
;
305 sync_type
= OMAP_DMA_SRC_SYNC
;
306 } else if (dir
== DMA_MEM_TO_DEV
) {
307 dev_addr
= c
->cfg
.dst_addr
;
308 dev_width
= c
->cfg
.dst_addr_width
;
309 burst
= c
->cfg
.dst_maxburst
;
310 sync_type
= OMAP_DMA_DST_SYNC
;
312 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
316 /* Bus width translates to the element size (ES) */
318 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
319 es
= OMAP_DMA_DATA_TYPE_S8
;
321 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
322 es
= OMAP_DMA_DATA_TYPE_S16
;
324 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
325 es
= OMAP_DMA_DATA_TYPE_S32
;
327 default: /* not reached */
331 /* Now allocate and setup the descriptor. */
332 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->sg
[0]), GFP_ATOMIC
);
337 d
->dev_addr
= dev_addr
;
339 d
->sync_mode
= OMAP_DMA_SYNC_FRAME
;
340 d
->sync_type
= sync_type
;
341 d
->periph_port
= OMAP_DMA_PORT_TIPB
;
344 * Build our scatterlist entries: each contains the address,
345 * the number of elements (EN) in each frame, and the number of
346 * frames (FN). Number of bytes for this entry = ES * EN * FN.
348 * Burst size translates to number of elements with frame sync.
349 * Note: DMA engine defines burst to be the number of dev-width
353 frame_bytes
= es_bytes
[es
] * en
;
354 for_each_sg(sgl
, sgent
, sglen
, i
) {
355 d
->sg
[j
].addr
= sg_dma_address(sgent
);
357 d
->sg
[j
].fn
= sg_dma_len(sgent
) / frame_bytes
;
363 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
366 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
367 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
368 size_t period_len
, enum dma_transfer_direction dir
, void *context
)
370 struct omap_chan
*c
= to_omap_dma_chan(chan
);
371 enum dma_slave_buswidth dev_width
;
374 unsigned es
, sync_type
;
377 if (dir
== DMA_DEV_TO_MEM
) {
378 dev_addr
= c
->cfg
.src_addr
;
379 dev_width
= c
->cfg
.src_addr_width
;
380 burst
= c
->cfg
.src_maxburst
;
381 sync_type
= OMAP_DMA_SRC_SYNC
;
382 } else if (dir
== DMA_MEM_TO_DEV
) {
383 dev_addr
= c
->cfg
.dst_addr
;
384 dev_width
= c
->cfg
.dst_addr_width
;
385 burst
= c
->cfg
.dst_maxburst
;
386 sync_type
= OMAP_DMA_DST_SYNC
;
388 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
392 /* Bus width translates to the element size (ES) */
394 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
395 es
= OMAP_DMA_DATA_TYPE_S8
;
397 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
398 es
= OMAP_DMA_DATA_TYPE_S16
;
400 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
401 es
= OMAP_DMA_DATA_TYPE_S32
;
403 default: /* not reached */
407 /* Now allocate and setup the descriptor. */
408 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
413 d
->dev_addr
= dev_addr
;
416 d
->sync_mode
= OMAP_DMA_SYNC_PACKET
;
417 d
->sync_type
= sync_type
;
418 d
->periph_port
= OMAP_DMA_PORT_MPUI
;
419 d
->sg
[0].addr
= buf_addr
;
420 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
421 d
->sg
[0].fn
= buf_len
/ period_len
;
426 omap_dma_link_lch(c
->dma_ch
, c
->dma_ch
);
427 omap_enable_dma_irq(c
->dma_ch
, OMAP_DMA_FRAME_IRQ
);
428 omap_disable_dma_irq(c
->dma_ch
, OMAP_DMA_BLOCK_IRQ
);
431 if (!cpu_class_is_omap1()) {
432 omap_set_dma_src_burst_mode(c
->dma_ch
, OMAP_DMA_DATA_BURST_16
);
433 omap_set_dma_dest_burst_mode(c
->dma_ch
, OMAP_DMA_DATA_BURST_16
);
436 return vchan_tx_prep(&c
->vc
, &d
->vd
, DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
);
439 static int omap_dma_slave_config(struct omap_chan
*c
, struct dma_slave_config
*cfg
)
441 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
442 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
445 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
450 static int omap_dma_terminate_all(struct omap_chan
*c
)
452 struct omap_dmadev
*d
= to_omap_dma_dev(c
->vc
.chan
.device
);
456 spin_lock_irqsave(&c
->vc
.lock
, flags
);
458 /* Prevent this channel being scheduled */
460 list_del_init(&c
->node
);
461 spin_unlock(&d
->lock
);
464 * Stop DMA activity: we assume the callback will not be called
465 * after omap_stop_dma() returns (even if it does, it will see
466 * c->desc is NULL and exit.)
470 omap_stop_dma(c
->dma_ch
);
475 omap_dma_unlink_lch(c
->dma_ch
, c
->dma_ch
);
478 vchan_get_all_descriptors(&c
->vc
, &head
);
479 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
480 vchan_dma_desc_free_list(&c
->vc
, &head
);
485 static int omap_dma_pause(struct omap_chan
*c
)
487 /* FIXME: not supported by platform private API */
491 static int omap_dma_resume(struct omap_chan
*c
)
493 /* FIXME: not supported by platform private API */
497 static int omap_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
500 struct omap_chan
*c
= to_omap_dma_chan(chan
);
504 case DMA_SLAVE_CONFIG
:
505 ret
= omap_dma_slave_config(c
, (struct dma_slave_config
*)arg
);
508 case DMA_TERMINATE_ALL
:
509 ret
= omap_dma_terminate_all(c
);
513 ret
= omap_dma_pause(c
);
517 ret
= omap_dma_resume(c
);
528 static int omap_dma_chan_init(struct omap_dmadev
*od
, int dma_sig
)
532 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
536 c
->dma_sig
= dma_sig
;
537 c
->vc
.desc_free
= omap_dma_desc_free
;
538 vchan_init(&c
->vc
, &od
->ddev
);
539 INIT_LIST_HEAD(&c
->node
);
546 static void omap_dma_free(struct omap_dmadev
*od
)
548 tasklet_kill(&od
->task
);
549 while (!list_empty(&od
->ddev
.channels
)) {
550 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
551 struct omap_chan
, vc
.chan
.device_node
);
553 list_del(&c
->vc
.chan
.device_node
);
554 tasklet_kill(&c
->vc
.task
);
560 static int omap_dma_probe(struct platform_device
*pdev
)
562 struct omap_dmadev
*od
;
565 od
= kzalloc(sizeof(*od
), GFP_KERNEL
);
569 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
570 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
571 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
572 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
573 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
574 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
575 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
576 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
577 od
->ddev
.device_control
= omap_dma_control
;
578 od
->ddev
.dev
= &pdev
->dev
;
579 INIT_LIST_HEAD(&od
->ddev
.channels
);
580 INIT_LIST_HEAD(&od
->pending
);
581 spin_lock_init(&od
->lock
);
583 tasklet_init(&od
->task
, omap_dma_sched
, (unsigned long)od
);
585 for (i
= 0; i
< 127; i
++) {
586 rc
= omap_dma_chan_init(od
, i
);
593 rc
= dma_async_device_register(&od
->ddev
);
595 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
599 platform_set_drvdata(pdev
, od
);
602 dev_info(&pdev
->dev
, "OMAP DMA engine driver\n");
607 static int omap_dma_remove(struct platform_device
*pdev
)
609 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
611 dma_async_device_unregister(&od
->ddev
);
617 static struct platform_driver omap_dma_driver
= {
618 .probe
= omap_dma_probe
,
619 .remove
= omap_dma_remove
,
621 .name
= "omap-dma-engine",
622 .owner
= THIS_MODULE
,
626 bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
628 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
629 struct omap_chan
*c
= to_omap_dma_chan(chan
);
630 unsigned req
= *(unsigned *)param
;
632 return req
== c
->dma_sig
;
636 EXPORT_SYMBOL_GPL(omap_dma_filter_fn
);
638 static struct platform_device
*pdev
;
640 static const struct platform_device_info omap_dma_dev_info
= {
641 .name
= "omap-dma-engine",
643 .dma_mask
= DMA_BIT_MASK(32),
646 static int omap_dma_init(void)
648 int rc
= platform_driver_register(&omap_dma_driver
);
651 pdev
= platform_device_register_full(&omap_dma_dev_info
);
653 platform_driver_unregister(&omap_dma_driver
);
659 subsys_initcall(omap_dma_init
);
661 static void __exit
omap_dma_exit(void)
663 platform_device_unregister(pdev
);
664 platform_driver_unregister(&omap_dma_driver
);
666 module_exit(omap_dma_exit
);
668 MODULE_AUTHOR("Russell King");
669 MODULE_LICENSE("GPL");