dmaengine: omap: Add support for pause/resume in cyclic dma mode
[deliverable/linux.git] / drivers / dma / omap-dma.c
CommitLineData
7bedaa55
RK
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/list.h>
14#include <linux/module.h>
15#include <linux/omap-dma.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "virt-dma.h"
21#include <plat/dma.h>
22
23struct omap_dmadev {
24 struct dma_device ddev;
25 spinlock_t lock;
26 struct tasklet_struct task;
27 struct list_head pending;
28};
29
30struct omap_chan {
31 struct virt_dma_chan vc;
32 struct list_head node;
33
34 struct dma_slave_config cfg;
35 unsigned dma_sig;
3a774ea9 36 bool cyclic;
2dcdf570 37 bool paused;
7bedaa55
RK
38
39 int dma_ch;
40 struct omap_desc *desc;
41 unsigned sgidx;
42};
43
44struct omap_sg {
45 dma_addr_t addr;
46 uint32_t en; /* number of elements (24-bit) */
47 uint32_t fn; /* number of frames (16-bit) */
48};
49
50struct omap_desc {
51 struct virt_dma_desc vd;
52 enum dma_transfer_direction dir;
53 dma_addr_t dev_addr;
54
7c836bc7 55 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
7bedaa55
RK
56 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
57 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
58 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
59 uint8_t periph_port; /* Peripheral port */
60
61 unsigned sglen;
62 struct omap_sg sg[0];
63};
64
65static const unsigned es_bytes[] = {
66 [OMAP_DMA_DATA_TYPE_S8] = 1,
67 [OMAP_DMA_DATA_TYPE_S16] = 2,
68 [OMAP_DMA_DATA_TYPE_S32] = 4,
69};
70
71static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
72{
73 return container_of(d, struct omap_dmadev, ddev);
74}
75
76static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
77{
78 return container_of(c, struct omap_chan, vc.chan);
79}
80
81static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
82{
83 return container_of(t, struct omap_desc, vd.tx);
84}
85
86static void omap_dma_desc_free(struct virt_dma_desc *vd)
87{
88 kfree(container_of(vd, struct omap_desc, vd));
89}
90
91static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
92 unsigned idx)
93{
94 struct omap_sg *sg = d->sg + idx;
95
96 if (d->dir == DMA_DEV_TO_MEM)
97 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
98 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
99 else
100 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
101 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
102
103 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
104 d->sync_mode, c->dma_sig, d->sync_type);
105
106 omap_start_dma(c->dma_ch);
107}
108
109static void omap_dma_start_desc(struct omap_chan *c)
110{
111 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
112 struct omap_desc *d;
113
114 if (!vd) {
115 c->desc = NULL;
116 return;
117 }
118
119 list_del(&vd->node);
120
121 c->desc = d = to_omap_dma_desc(&vd->tx);
122 c->sgidx = 0;
123
124 if (d->dir == DMA_DEV_TO_MEM)
125 omap_set_dma_src_params(c->dma_ch, d->periph_port,
7c836bc7 126 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
7bedaa55
RK
127 else
128 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
7c836bc7 129 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
7bedaa55
RK
130
131 omap_dma_start_sg(c, d, 0);
132}
133
134static void omap_dma_callback(int ch, u16 status, void *data)
135{
136 struct omap_chan *c = data;
137 struct omap_desc *d;
138 unsigned long flags;
139
140 spin_lock_irqsave(&c->vc.lock, flags);
141 d = c->desc;
142 if (d) {
3a774ea9
RK
143 if (!c->cyclic) {
144 if (++c->sgidx < d->sglen) {
145 omap_dma_start_sg(c, d, c->sgidx);
146 } else {
147 omap_dma_start_desc(c);
148 vchan_cookie_complete(&d->vd);
149 }
7bedaa55 150 } else {
3a774ea9 151 vchan_cyclic_callback(&d->vd);
7bedaa55
RK
152 }
153 }
154 spin_unlock_irqrestore(&c->vc.lock, flags);
155}
156
157/*
158 * This callback schedules all pending channels. We could be more
159 * clever here by postponing allocation of the real DMA channels to
160 * this point, and freeing them when our virtual channel becomes idle.
161 *
162 * We would then need to deal with 'all channels in-use'
163 */
164static void omap_dma_sched(unsigned long data)
165{
166 struct omap_dmadev *d = (struct omap_dmadev *)data;
167 LIST_HEAD(head);
168
169 spin_lock_irq(&d->lock);
170 list_splice_tail_init(&d->pending, &head);
171 spin_unlock_irq(&d->lock);
172
173 while (!list_empty(&head)) {
174 struct omap_chan *c = list_first_entry(&head,
175 struct omap_chan, node);
176
177 spin_lock_irq(&c->vc.lock);
178 list_del_init(&c->node);
179 omap_dma_start_desc(c);
180 spin_unlock_irq(&c->vc.lock);
181 }
182}
183
184static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
185{
186 struct omap_chan *c = to_omap_dma_chan(chan);
187
188 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
189
190 return omap_request_dma(c->dma_sig, "DMA engine",
191 omap_dma_callback, c, &c->dma_ch);
192}
193
194static void omap_dma_free_chan_resources(struct dma_chan *chan)
195{
196 struct omap_chan *c = to_omap_dma_chan(chan);
197
198 vchan_free_chan_resources(&c->vc);
199 omap_free_dma(c->dma_ch);
200
201 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
202}
203
3850e22f
RK
204static size_t omap_dma_sg_size(struct omap_sg *sg)
205{
206 return sg->en * sg->fn;
207}
208
209static size_t omap_dma_desc_size(struct omap_desc *d)
210{
211 unsigned i;
212 size_t size;
213
214 for (size = i = 0; i < d->sglen; i++)
215 size += omap_dma_sg_size(&d->sg[i]);
216
217 return size * es_bytes[d->es];
218}
219
220static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
221{
222 unsigned i;
223 size_t size, es_size = es_bytes[d->es];
224
225 for (size = i = 0; i < d->sglen; i++) {
226 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
227
228 if (size)
229 size += this_size;
230 else if (addr >= d->sg[i].addr &&
231 addr < d->sg[i].addr + this_size)
232 size += d->sg[i].addr + this_size - addr;
233 }
234 return size;
235}
236
7bedaa55
RK
237static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
238 dma_cookie_t cookie, struct dma_tx_state *txstate)
239{
3850e22f
RK
240 struct omap_chan *c = to_omap_dma_chan(chan);
241 struct virt_dma_desc *vd;
242 enum dma_status ret;
243 unsigned long flags;
244
245 ret = dma_cookie_status(chan, cookie, txstate);
246 if (ret == DMA_SUCCESS || !txstate)
247 return ret;
248
249 spin_lock_irqsave(&c->vc.lock, flags);
250 vd = vchan_find_desc(&c->vc, cookie);
251 if (vd) {
252 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
253 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
254 struct omap_desc *d = c->desc;
255 dma_addr_t pos;
256
257 if (d->dir == DMA_MEM_TO_DEV)
258 pos = omap_get_dma_src_pos(c->dma_ch);
259 else if (d->dir == DMA_DEV_TO_MEM)
260 pos = omap_get_dma_dst_pos(c->dma_ch);
261 else
262 pos = 0;
263
264 txstate->residue = omap_dma_desc_size_pos(d, pos);
265 } else {
266 txstate->residue = 0;
267 }
268 spin_unlock_irqrestore(&c->vc.lock, flags);
269
270 return ret;
7bedaa55
RK
271}
272
273static void omap_dma_issue_pending(struct dma_chan *chan)
274{
275 struct omap_chan *c = to_omap_dma_chan(chan);
276 unsigned long flags;
277
278 spin_lock_irqsave(&c->vc.lock, flags);
279 if (vchan_issue_pending(&c->vc) && !c->desc) {
280 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
281 spin_lock(&d->lock);
282 if (list_empty(&c->node))
283 list_add_tail(&c->node, &d->pending);
284 spin_unlock(&d->lock);
285 tasklet_schedule(&d->task);
286 }
287 spin_unlock_irqrestore(&c->vc.lock, flags);
288}
289
290static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
291 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
292 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
293{
294 struct omap_chan *c = to_omap_dma_chan(chan);
295 enum dma_slave_buswidth dev_width;
296 struct scatterlist *sgent;
297 struct omap_desc *d;
298 dma_addr_t dev_addr;
299 unsigned i, j = 0, es, en, frame_bytes, sync_type;
300 u32 burst;
301
302 if (dir == DMA_DEV_TO_MEM) {
303 dev_addr = c->cfg.src_addr;
304 dev_width = c->cfg.src_addr_width;
305 burst = c->cfg.src_maxburst;
306 sync_type = OMAP_DMA_SRC_SYNC;
307 } else if (dir == DMA_MEM_TO_DEV) {
308 dev_addr = c->cfg.dst_addr;
309 dev_width = c->cfg.dst_addr_width;
310 burst = c->cfg.dst_maxburst;
311 sync_type = OMAP_DMA_DST_SYNC;
312 } else {
313 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
314 return NULL;
315 }
316
317 /* Bus width translates to the element size (ES) */
318 switch (dev_width) {
319 case DMA_SLAVE_BUSWIDTH_1_BYTE:
320 es = OMAP_DMA_DATA_TYPE_S8;
321 break;
322 case DMA_SLAVE_BUSWIDTH_2_BYTES:
323 es = OMAP_DMA_DATA_TYPE_S16;
324 break;
325 case DMA_SLAVE_BUSWIDTH_4_BYTES:
326 es = OMAP_DMA_DATA_TYPE_S32;
327 break;
328 default: /* not reached */
329 return NULL;
330 }
331
332 /* Now allocate and setup the descriptor. */
333 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
334 if (!d)
335 return NULL;
336
337 d->dir = dir;
338 d->dev_addr = dev_addr;
339 d->es = es;
340 d->sync_mode = OMAP_DMA_SYNC_FRAME;
341 d->sync_type = sync_type;
342 d->periph_port = OMAP_DMA_PORT_TIPB;
343
344 /*
345 * Build our scatterlist entries: each contains the address,
346 * the number of elements (EN) in each frame, and the number of
347 * frames (FN). Number of bytes for this entry = ES * EN * FN.
348 *
349 * Burst size translates to number of elements with frame sync.
350 * Note: DMA engine defines burst to be the number of dev-width
351 * transfers.
352 */
353 en = burst;
354 frame_bytes = es_bytes[es] * en;
355 for_each_sg(sgl, sgent, sglen, i) {
356 d->sg[j].addr = sg_dma_address(sgent);
357 d->sg[j].en = en;
358 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
359 j++;
360 }
361
362 d->sglen = j;
363
364 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
365}
366
3a774ea9
RK
367static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
368 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
369 size_t period_len, enum dma_transfer_direction dir, void *context)
370{
371 struct omap_chan *c = to_omap_dma_chan(chan);
372 enum dma_slave_buswidth dev_width;
373 struct omap_desc *d;
374 dma_addr_t dev_addr;
375 unsigned es, sync_type;
376 u32 burst;
377
378 if (dir == DMA_DEV_TO_MEM) {
379 dev_addr = c->cfg.src_addr;
380 dev_width = c->cfg.src_addr_width;
381 burst = c->cfg.src_maxburst;
382 sync_type = OMAP_DMA_SRC_SYNC;
383 } else if (dir == DMA_MEM_TO_DEV) {
384 dev_addr = c->cfg.dst_addr;
385 dev_width = c->cfg.dst_addr_width;
386 burst = c->cfg.dst_maxburst;
387 sync_type = OMAP_DMA_DST_SYNC;
388 } else {
389 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
390 return NULL;
391 }
392
393 /* Bus width translates to the element size (ES) */
394 switch (dev_width) {
395 case DMA_SLAVE_BUSWIDTH_1_BYTE:
396 es = OMAP_DMA_DATA_TYPE_S8;
397 break;
398 case DMA_SLAVE_BUSWIDTH_2_BYTES:
399 es = OMAP_DMA_DATA_TYPE_S16;
400 break;
401 case DMA_SLAVE_BUSWIDTH_4_BYTES:
402 es = OMAP_DMA_DATA_TYPE_S32;
403 break;
404 default: /* not reached */
405 return NULL;
406 }
407
408 /* Now allocate and setup the descriptor. */
409 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
410 if (!d)
411 return NULL;
412
413 d->dir = dir;
414 d->dev_addr = dev_addr;
415 d->fi = burst;
416 d->es = es;
ccffa387
PU
417 if (burst)
418 d->sync_mode = OMAP_DMA_SYNC_PACKET;
419 else
420 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
3a774ea9
RK
421 d->sync_type = sync_type;
422 d->periph_port = OMAP_DMA_PORT_MPUI;
423 d->sg[0].addr = buf_addr;
424 d->sg[0].en = period_len / es_bytes[es];
425 d->sg[0].fn = buf_len / period_len;
426 d->sglen = 1;
427
428 if (!c->cyclic) {
429 c->cyclic = true;
430 omap_dma_link_lch(c->dma_ch, c->dma_ch);
431 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
432 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
433 }
434
435 if (!cpu_class_is_omap1()) {
436 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
437 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
438 }
439
440 return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
441}
442
7bedaa55
RK
443static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
444{
445 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
446 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
447 return -EINVAL;
448
449 memcpy(&c->cfg, cfg, sizeof(c->cfg));
450
451 return 0;
452}
453
454static int omap_dma_terminate_all(struct omap_chan *c)
455{
456 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
457 unsigned long flags;
458 LIST_HEAD(head);
459
460 spin_lock_irqsave(&c->vc.lock, flags);
461
462 /* Prevent this channel being scheduled */
463 spin_lock(&d->lock);
464 list_del_init(&c->node);
465 spin_unlock(&d->lock);
466
467 /*
468 * Stop DMA activity: we assume the callback will not be called
469 * after omap_stop_dma() returns (even if it does, it will see
470 * c->desc is NULL and exit.)
471 */
472 if (c->desc) {
473 c->desc = NULL;
2dcdf570
PU
474 /* Avoid stopping the dma twice */
475 if (!c->paused)
476 omap_stop_dma(c->dma_ch);
7bedaa55
RK
477 }
478
3a774ea9
RK
479 if (c->cyclic) {
480 c->cyclic = false;
2dcdf570 481 c->paused = false;
3a774ea9
RK
482 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
483 }
484
7bedaa55
RK
485 vchan_get_all_descriptors(&c->vc, &head);
486 spin_unlock_irqrestore(&c->vc.lock, flags);
487 vchan_dma_desc_free_list(&c->vc, &head);
488
489 return 0;
490}
491
492static int omap_dma_pause(struct omap_chan *c)
493{
2dcdf570
PU
494 /* Pause/Resume only allowed with cyclic mode */
495 if (!c->cyclic)
496 return -EINVAL;
497
498 if (!c->paused) {
499 omap_stop_dma(c->dma_ch);
500 c->paused = true;
501 }
502
503 return 0;
7bedaa55
RK
504}
505
506static int omap_dma_resume(struct omap_chan *c)
507{
2dcdf570
PU
508 /* Pause/Resume only allowed with cyclic mode */
509 if (!c->cyclic)
510 return -EINVAL;
511
512 if (c->paused) {
513 omap_start_dma(c->dma_ch);
514 c->paused = false;
515 }
516
517 return 0;
7bedaa55
RK
518}
519
520static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
521 unsigned long arg)
522{
523 struct omap_chan *c = to_omap_dma_chan(chan);
524 int ret;
525
526 switch (cmd) {
527 case DMA_SLAVE_CONFIG:
528 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
529 break;
530
531 case DMA_TERMINATE_ALL:
532 ret = omap_dma_terminate_all(c);
533 break;
534
535 case DMA_PAUSE:
536 ret = omap_dma_pause(c);
537 break;
538
539 case DMA_RESUME:
540 ret = omap_dma_resume(c);
541 break;
542
543 default:
544 ret = -ENXIO;
545 break;
546 }
547
548 return ret;
549}
550
551static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
552{
553 struct omap_chan *c;
554
555 c = kzalloc(sizeof(*c), GFP_KERNEL);
556 if (!c)
557 return -ENOMEM;
558
559 c->dma_sig = dma_sig;
560 c->vc.desc_free = omap_dma_desc_free;
561 vchan_init(&c->vc, &od->ddev);
562 INIT_LIST_HEAD(&c->node);
563
564 od->ddev.chancnt++;
565
566 return 0;
567}
568
569static void omap_dma_free(struct omap_dmadev *od)
570{
571 tasklet_kill(&od->task);
572 while (!list_empty(&od->ddev.channels)) {
573 struct omap_chan *c = list_first_entry(&od->ddev.channels,
574 struct omap_chan, vc.chan.device_node);
575
576 list_del(&c->vc.chan.device_node);
577 tasklet_kill(&c->vc.task);
578 kfree(c);
579 }
580 kfree(od);
581}
582
583static int omap_dma_probe(struct platform_device *pdev)
584{
585 struct omap_dmadev *od;
586 int rc, i;
587
588 od = kzalloc(sizeof(*od), GFP_KERNEL);
589 if (!od)
590 return -ENOMEM;
591
592 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
3a774ea9 593 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
7bedaa55
RK
594 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
595 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
596 od->ddev.device_tx_status = omap_dma_tx_status;
597 od->ddev.device_issue_pending = omap_dma_issue_pending;
598 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
3a774ea9 599 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
7bedaa55
RK
600 od->ddev.device_control = omap_dma_control;
601 od->ddev.dev = &pdev->dev;
602 INIT_LIST_HEAD(&od->ddev.channels);
603 INIT_LIST_HEAD(&od->pending);
604 spin_lock_init(&od->lock);
605
606 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
607
608 for (i = 0; i < 127; i++) {
609 rc = omap_dma_chan_init(od, i);
610 if (rc) {
611 omap_dma_free(od);
612 return rc;
613 }
614 }
615
616 rc = dma_async_device_register(&od->ddev);
617 if (rc) {
618 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
619 rc);
620 omap_dma_free(od);
621 } else {
622 platform_set_drvdata(pdev, od);
623 }
624
625 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
626
627 return rc;
628}
629
630static int omap_dma_remove(struct platform_device *pdev)
631{
632 struct omap_dmadev *od = platform_get_drvdata(pdev);
633
634 dma_async_device_unregister(&od->ddev);
635 omap_dma_free(od);
636
637 return 0;
638}
639
640static struct platform_driver omap_dma_driver = {
641 .probe = omap_dma_probe,
642 .remove = omap_dma_remove,
643 .driver = {
644 .name = "omap-dma-engine",
645 .owner = THIS_MODULE,
646 },
647};
648
649bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
650{
651 if (chan->device->dev->driver == &omap_dma_driver.driver) {
652 struct omap_chan *c = to_omap_dma_chan(chan);
653 unsigned req = *(unsigned *)param;
654
655 return req == c->dma_sig;
656 }
657 return false;
658}
659EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
660
661static struct platform_device *pdev;
662
663static const struct platform_device_info omap_dma_dev_info = {
664 .name = "omap-dma-engine",
665 .id = -1,
666 .dma_mask = DMA_BIT_MASK(32),
667};
668
669static int omap_dma_init(void)
670{
671 int rc = platform_driver_register(&omap_dma_driver);
672
673 if (rc == 0) {
674 pdev = platform_device_register_full(&omap_dma_dev_info);
675 if (IS_ERR(pdev)) {
676 platform_driver_unregister(&omap_dma_driver);
677 rc = PTR_ERR(pdev);
678 }
679 }
680 return rc;
681}
682subsys_initcall(omap_dma_init);
683
684static void __exit omap_dma_exit(void)
685{
686 platform_device_unregister(pdev);
687 platform_driver_unregister(&omap_dma_driver);
688}
689module_exit(omap_dma_exit);
690
691MODULE_AUTHOR("Russell King");
692MODULE_LICENSE("GPL");
This page took 0.074291 seconds and 5 git commands to generate.