Commit | Line | Data |
---|---|---|
50437bff RK |
1 | /* |
2 | * Virtual DMA channel support for DMAengine | |
3 | * | |
4 | * Copyright (C) 2012 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/device.h> | |
11 | #include <linux/dmaengine.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/spinlock.h> | |
14 | ||
15 | #include "virt-dma.h" | |
16 | ||
17 | static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) | |
18 | { | |
19 | return container_of(tx, struct virt_dma_desc, tx); | |
20 | } | |
21 | ||
22 | dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | |
23 | { | |
24 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | |
25 | struct virt_dma_desc *vd = to_virt_desc(tx); | |
26 | unsigned long flags; | |
27 | dma_cookie_t cookie; | |
28 | ||
29 | spin_lock_irqsave(&vc->lock, flags); | |
30 | cookie = dma_cookie_assign(tx); | |
31 | ||
13bb26ae | 32 | list_move_tail(&vd->node, &vc->desc_submitted); |
50437bff RK |
33 | spin_unlock_irqrestore(&vc->lock, flags); |
34 | ||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | |
36 | vc, vd, cookie); | |
37 | ||
38 | return cookie; | |
39 | } | |
40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); | |
41 | ||
13bb26ae RJ |
42 | /** |
43 | * vchan_tx_desc_free - free a reusable descriptor | |
44 | * @tx: the transfer | |
45 | * | |
46 | * This function frees a previously allocated reusable descriptor. The only | |
47 | * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the | |
48 | * transfer. | |
49 | * | |
50 | * Returns 0 upon success | |
51 | */ | |
52 | int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) | |
53 | { | |
54 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | |
55 | struct virt_dma_desc *vd = to_virt_desc(tx); | |
56 | unsigned long flags; | |
57 | ||
58 | spin_lock_irqsave(&vc->lock, flags); | |
59 | list_del(&vd->node); | |
60 | spin_unlock_irqrestore(&vc->lock, flags); | |
61 | ||
62 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", | |
63 | vc, vd, vd->tx.cookie); | |
64 | vc->desc_free(vd); | |
65 | return 0; | |
66 | } | |
67 | EXPORT_SYMBOL_GPL(vchan_tx_desc_free); | |
68 | ||
fe045874 RK |
69 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, |
70 | dma_cookie_t cookie) | |
71 | { | |
72 | struct virt_dma_desc *vd; | |
73 | ||
74 | list_for_each_entry(vd, &vc->desc_issued, node) | |
75 | if (vd->tx.cookie == cookie) | |
76 | return vd; | |
77 | ||
78 | return NULL; | |
79 | } | |
80 | EXPORT_SYMBOL_GPL(vchan_find_desc); | |
81 | ||
50437bff RK |
82 | /* |
83 | * This tasklet handles the completion of a DMA descriptor by | |
84 | * calling its callback and freeing it. | |
85 | */ | |
86 | static void vchan_complete(unsigned long arg) | |
87 | { | |
88 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; | |
571fa740 RK |
89 | struct virt_dma_desc *vd; |
90 | dma_async_tx_callback cb = NULL; | |
91 | void *cb_data = NULL; | |
50437bff RK |
92 | LIST_HEAD(head); |
93 | ||
94 | spin_lock_irq(&vc->lock); | |
95 | list_splice_tail_init(&vc->desc_completed, &head); | |
571fa740 RK |
96 | vd = vc->cyclic; |
97 | if (vd) { | |
98 | vc->cyclic = NULL; | |
99 | cb = vd->tx.callback; | |
100 | cb_data = vd->tx.callback_param; | |
101 | } | |
50437bff RK |
102 | spin_unlock_irq(&vc->lock); |
103 | ||
571fa740 RK |
104 | if (cb) |
105 | cb(cb_data); | |
106 | ||
50437bff | 107 | while (!list_empty(&head)) { |
571fa740 RK |
108 | vd = list_first_entry(&head, struct virt_dma_desc, node); |
109 | cb = vd->tx.callback; | |
110 | cb_data = vd->tx.callback_param; | |
50437bff RK |
111 | |
112 | list_del(&vd->node); | |
13bb26ae RJ |
113 | if (dmaengine_desc_test_reuse(&vd->tx)) |
114 | list_add(&vd->node, &vc->desc_allocated); | |
115 | else | |
116 | vc->desc_free(vd); | |
50437bff RK |
117 | |
118 | if (cb) | |
119 | cb(cb_data); | |
120 | } | |
121 | } | |
122 | ||
123 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | |
124 | { | |
125 | while (!list_empty(head)) { | |
126 | struct virt_dma_desc *vd = list_first_entry(head, | |
127 | struct virt_dma_desc, node); | |
13bb26ae RJ |
128 | if (dmaengine_desc_test_reuse(&vd->tx)) { |
129 | list_move_tail(&vd->node, &vc->desc_allocated); | |
130 | } else { | |
131 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | |
132 | list_del(&vd->node); | |
133 | vc->desc_free(vd); | |
134 | } | |
50437bff RK |
135 | } |
136 | } | |
137 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | |
138 | ||
139 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | |
140 | { | |
141 | dma_cookie_init(&vc->chan); | |
142 | ||
143 | spin_lock_init(&vc->lock); | |
13bb26ae | 144 | INIT_LIST_HEAD(&vc->desc_allocated); |
50437bff RK |
145 | INIT_LIST_HEAD(&vc->desc_submitted); |
146 | INIT_LIST_HEAD(&vc->desc_issued); | |
147 | INIT_LIST_HEAD(&vc->desc_completed); | |
148 | ||
149 | tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); | |
150 | ||
151 | vc->chan.device = dmadev; | |
152 | list_add_tail(&vc->chan.device_node, &dmadev->channels); | |
153 | } | |
154 | EXPORT_SYMBOL_GPL(vchan_init); | |
155 | ||
156 | MODULE_AUTHOR("Russell King"); | |
157 | MODULE_LICENSE("GPL"); |