Commit | Line | Data |
---|---|---|
c13c8260 CL |
1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
24 | * for other kernel code to use asynchronous memory copy capabilities, | |
25 | * if present, and allows different HW DMA drivers to register as providing | |
26 | * this capability. | |
27 | * | |
28 | * Due to the fact we are accelerating what is already a relatively fast | |
29 | * operation, the code goes to great lengths to avoid additional overhead, | |
30 | * such as locking. | |
31 | * | |
32 | * LOCKING: | |
33 | * | |
aa1e6f1a DW |
34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * mutex, dma_list_mutex. | |
c13c8260 | 36 | * |
f27c580c DW |
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
40 | * against its corresponding driver to disable removal. | |
41 | * | |
c13c8260 CL |
42 | * Each device has a channels list, which runs unlocked but is never modified |
43 | * once the device is registered, it's just setup by the driver. | |
44 | * | |
f27c580c | 45 | * See Documentation/dmaengine.txt for more details |
c13c8260 CL |
46 | */ |
47 | ||
63433250 JP |
48 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
49 | ||
b7f080cf | 50 | #include <linux/dma-mapping.h> |
c13c8260 CL |
51 | #include <linux/init.h> |
52 | #include <linux/module.h> | |
7405f74b | 53 | #include <linux/mm.h> |
c13c8260 CL |
54 | #include <linux/device.h> |
55 | #include <linux/dmaengine.h> | |
56 | #include <linux/hardirq.h> | |
57 | #include <linux/spinlock.h> | |
58 | #include <linux/percpu.h> | |
59 | #include <linux/rcupdate.h> | |
60 | #include <linux/mutex.h> | |
7405f74b | 61 | #include <linux/jiffies.h> |
2ba05622 | 62 | #include <linux/rculist.h> |
864498aa | 63 | #include <linux/idr.h> |
5a0e3ad6 | 64 | #include <linux/slab.h> |
4e82f5dd AS |
65 | #include <linux/acpi.h> |
66 | #include <linux/acpi_dma.h> | |
9a6cecc8 | 67 | #include <linux/of_dma.h> |
45c463ae | 68 | #include <linux/mempool.h> |
c13c8260 CL |
69 | |
70 | static DEFINE_MUTEX(dma_list_mutex); | |
21ef4b8b | 71 | static DEFINE_IDR(dma_idr); |
c13c8260 | 72 | static LIST_HEAD(dma_device_list); |
6f49a57a | 73 | static long dmaengine_ref_count; |
c13c8260 CL |
74 | |
75 | /* --- sysfs implementation --- */ | |
76 | ||
41d5e59c DW |
77 | /** |
78 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | |
79 | * @dev - device node | |
80 | * | |
81 | * Must be called under dma_list_mutex | |
82 | */ | |
83 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
84 | { | |
85 | struct dma_chan_dev *chan_dev; | |
86 | ||
87 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
88 | return chan_dev->chan; | |
89 | } | |
90 | ||
58b267d3 GKH |
91 | static ssize_t memcpy_count_show(struct device *dev, |
92 | struct device_attribute *attr, char *buf) | |
c13c8260 | 93 | { |
41d5e59c | 94 | struct dma_chan *chan; |
c13c8260 CL |
95 | unsigned long count = 0; |
96 | int i; | |
41d5e59c | 97 | int err; |
c13c8260 | 98 | |
41d5e59c DW |
99 | mutex_lock(&dma_list_mutex); |
100 | chan = dev_to_dma_chan(dev); | |
101 | if (chan) { | |
102 | for_each_possible_cpu(i) | |
103 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
104 | err = sprintf(buf, "%lu\n", count); | |
105 | } else | |
106 | err = -ENODEV; | |
107 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 108 | |
41d5e59c | 109 | return err; |
c13c8260 | 110 | } |
58b267d3 | 111 | static DEVICE_ATTR_RO(memcpy_count); |
c13c8260 | 112 | |
58b267d3 GKH |
113 | static ssize_t bytes_transferred_show(struct device *dev, |
114 | struct device_attribute *attr, char *buf) | |
c13c8260 | 115 | { |
41d5e59c | 116 | struct dma_chan *chan; |
c13c8260 CL |
117 | unsigned long count = 0; |
118 | int i; | |
41d5e59c | 119 | int err; |
c13c8260 | 120 | |
41d5e59c DW |
121 | mutex_lock(&dma_list_mutex); |
122 | chan = dev_to_dma_chan(dev); | |
123 | if (chan) { | |
124 | for_each_possible_cpu(i) | |
125 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
126 | err = sprintf(buf, "%lu\n", count); | |
127 | } else | |
128 | err = -ENODEV; | |
129 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 130 | |
41d5e59c | 131 | return err; |
c13c8260 | 132 | } |
58b267d3 | 133 | static DEVICE_ATTR_RO(bytes_transferred); |
c13c8260 | 134 | |
58b267d3 GKH |
135 | static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, |
136 | char *buf) | |
c13c8260 | 137 | { |
41d5e59c DW |
138 | struct dma_chan *chan; |
139 | int err; | |
c13c8260 | 140 | |
41d5e59c DW |
141 | mutex_lock(&dma_list_mutex); |
142 | chan = dev_to_dma_chan(dev); | |
143 | if (chan) | |
144 | err = sprintf(buf, "%d\n", chan->client_count); | |
145 | else | |
146 | err = -ENODEV; | |
147 | mutex_unlock(&dma_list_mutex); | |
148 | ||
149 | return err; | |
c13c8260 | 150 | } |
58b267d3 | 151 | static DEVICE_ATTR_RO(in_use); |
c13c8260 | 152 | |
58b267d3 GKH |
153 | static struct attribute *dma_dev_attrs[] = { |
154 | &dev_attr_memcpy_count.attr, | |
155 | &dev_attr_bytes_transferred.attr, | |
156 | &dev_attr_in_use.attr, | |
157 | NULL, | |
c13c8260 | 158 | }; |
58b267d3 | 159 | ATTRIBUTE_GROUPS(dma_dev); |
c13c8260 | 160 | |
41d5e59c DW |
161 | static void chan_dev_release(struct device *dev) |
162 | { | |
163 | struct dma_chan_dev *chan_dev; | |
164 | ||
165 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
864498aa DW |
166 | if (atomic_dec_and_test(chan_dev->idr_ref)) { |
167 | mutex_lock(&dma_list_mutex); | |
168 | idr_remove(&dma_idr, chan_dev->dev_id); | |
169 | mutex_unlock(&dma_list_mutex); | |
170 | kfree(chan_dev->idr_ref); | |
171 | } | |
41d5e59c DW |
172 | kfree(chan_dev); |
173 | } | |
174 | ||
c13c8260 | 175 | static struct class dma_devclass = { |
891f78ea | 176 | .name = "dma", |
58b267d3 | 177 | .dev_groups = dma_dev_groups, |
41d5e59c | 178 | .dev_release = chan_dev_release, |
c13c8260 CL |
179 | }; |
180 | ||
181 | /* --- client and device registration --- */ | |
182 | ||
59b5ec21 DW |
183 | #define dma_device_satisfies_mask(device, mask) \ |
184 | __dma_device_satisfies_mask((device), &(mask)) | |
d379b01e | 185 | static int |
a53e28da LPC |
186 | __dma_device_satisfies_mask(struct dma_device *device, |
187 | const dma_cap_mask_t *want) | |
d379b01e DW |
188 | { |
189 | dma_cap_mask_t has; | |
190 | ||
59b5ec21 | 191 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
192 | DMA_TX_TYPE_END); |
193 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
194 | } | |
195 | ||
6f49a57a DW |
196 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
197 | { | |
198 | return chan->device->dev->driver->owner; | |
199 | } | |
200 | ||
201 | /** | |
202 | * balance_ref_count - catch up the channel reference count | |
203 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
204 | * | |
205 | * balance_ref_count must be called under dma_list_mutex | |
206 | */ | |
207 | static void balance_ref_count(struct dma_chan *chan) | |
208 | { | |
209 | struct module *owner = dma_chan_to_owner(chan); | |
210 | ||
211 | while (chan->client_count < dmaengine_ref_count) { | |
212 | __module_get(owner); | |
213 | chan->client_count++; | |
214 | } | |
215 | } | |
216 | ||
217 | /** | |
218 | * dma_chan_get - try to grab a dma channel's parent driver module | |
219 | * @chan - channel to grab | |
220 | * | |
221 | * Must be called under dma_list_mutex | |
222 | */ | |
223 | static int dma_chan_get(struct dma_chan *chan) | |
224 | { | |
225 | int err = -ENODEV; | |
226 | struct module *owner = dma_chan_to_owner(chan); | |
227 | ||
228 | if (chan->client_count) { | |
229 | __module_get(owner); | |
230 | err = 0; | |
231 | } else if (try_module_get(owner)) | |
232 | err = 0; | |
233 | ||
234 | if (err == 0) | |
235 | chan->client_count++; | |
236 | ||
237 | /* allocate upon first client reference */ | |
238 | if (chan->client_count == 1 && err == 0) { | |
aa1e6f1a | 239 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); |
6f49a57a DW |
240 | |
241 | if (desc_cnt < 0) { | |
242 | err = desc_cnt; | |
243 | chan->client_count = 0; | |
244 | module_put(owner); | |
59b5ec21 | 245 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
6f49a57a DW |
246 | balance_ref_count(chan); |
247 | } | |
248 | ||
249 | return err; | |
250 | } | |
251 | ||
252 | /** | |
253 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
254 | * @chan - channel to release | |
255 | * | |
256 | * Must be called under dma_list_mutex | |
257 | */ | |
258 | static void dma_chan_put(struct dma_chan *chan) | |
259 | { | |
260 | if (!chan->client_count) | |
261 | return; /* this channel failed alloc_chan_resources */ | |
262 | chan->client_count--; | |
263 | module_put(dma_chan_to_owner(chan)); | |
264 | if (chan->client_count == 0) | |
265 | chan->device->device_free_chan_resources(chan); | |
266 | } | |
267 | ||
7405f74b DW |
268 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
269 | { | |
270 | enum dma_status status; | |
271 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
272 | ||
273 | dma_async_issue_pending(chan); | |
274 | do { | |
275 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
276 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
63433250 | 277 | pr_err("%s: timeout!\n", __func__); |
7405f74b DW |
278 | return DMA_ERROR; |
279 | } | |
2cbe7feb BZ |
280 | if (status != DMA_IN_PROGRESS) |
281 | break; | |
282 | cpu_relax(); | |
283 | } while (1); | |
7405f74b DW |
284 | |
285 | return status; | |
286 | } | |
287 | EXPORT_SYMBOL(dma_sync_wait); | |
288 | ||
bec08513 DW |
289 | /** |
290 | * dma_cap_mask_all - enable iteration over all operation types | |
291 | */ | |
292 | static dma_cap_mask_t dma_cap_mask_all; | |
293 | ||
294 | /** | |
295 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
296 | * @chan - associated channel for this entry | |
297 | */ | |
298 | struct dma_chan_tbl_ent { | |
299 | struct dma_chan *chan; | |
300 | }; | |
301 | ||
302 | /** | |
303 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
304 | */ | |
a29d8b8e | 305 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; |
bec08513 DW |
306 | |
307 | static int __init dma_channel_table_init(void) | |
308 | { | |
309 | enum dma_transaction_type cap; | |
310 | int err = 0; | |
311 | ||
312 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
313 | ||
59b5ec21 DW |
314 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
315 | * but are not associated with an operation so they do not need | |
316 | * an entry in the channel_table | |
bec08513 DW |
317 | */ |
318 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
59b5ec21 | 319 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); |
bec08513 DW |
320 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); |
321 | ||
322 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
323 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
324 | if (!channel_table[cap]) { | |
325 | err = -ENOMEM; | |
326 | break; | |
327 | } | |
328 | } | |
329 | ||
330 | if (err) { | |
63433250 | 331 | pr_err("initialization failure\n"); |
bec08513 DW |
332 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
333 | if (channel_table[cap]) | |
334 | free_percpu(channel_table[cap]); | |
335 | } | |
336 | ||
337 | return err; | |
338 | } | |
652afc27 | 339 | arch_initcall(dma_channel_table_init); |
bec08513 DW |
340 | |
341 | /** | |
342 | * dma_find_channel - find a channel to carry out the operation | |
343 | * @tx_type: transaction type | |
344 | */ | |
345 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
346 | { | |
e7dcaa47 | 347 | return this_cpu_read(channel_table[tx_type]->chan); |
bec08513 DW |
348 | } |
349 | EXPORT_SYMBOL(dma_find_channel); | |
350 | ||
a2bd1140 DJ |
351 | /* |
352 | * net_dma_find_channel - find a channel for net_dma | |
353 | * net_dma has alignment requirements | |
354 | */ | |
355 | struct dma_chan *net_dma_find_channel(void) | |
356 | { | |
357 | struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); | |
358 | if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) | |
359 | return NULL; | |
360 | ||
361 | return chan; | |
362 | } | |
363 | EXPORT_SYMBOL(net_dma_find_channel); | |
364 | ||
2ba05622 DW |
365 | /** |
366 | * dma_issue_pending_all - flush all pending operations across all channels | |
367 | */ | |
368 | void dma_issue_pending_all(void) | |
369 | { | |
370 | struct dma_device *device; | |
371 | struct dma_chan *chan; | |
372 | ||
2ba05622 | 373 | rcu_read_lock(); |
59b5ec21 DW |
374 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
375 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
376 | continue; | |
2ba05622 DW |
377 | list_for_each_entry(chan, &device->channels, device_node) |
378 | if (chan->client_count) | |
379 | device->device_issue_pending(chan); | |
59b5ec21 | 380 | } |
2ba05622 DW |
381 | rcu_read_unlock(); |
382 | } | |
383 | EXPORT_SYMBOL(dma_issue_pending_all); | |
384 | ||
bec08513 | 385 | /** |
c4d27c4d BG |
386 | * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu |
387 | */ | |
388 | static bool dma_chan_is_local(struct dma_chan *chan, int cpu) | |
389 | { | |
390 | int node = dev_to_node(chan->device->dev); | |
391 | return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); | |
392 | } | |
393 | ||
394 | /** | |
395 | * min_chan - returns the channel with min count and in the same numa-node as the cpu | |
bec08513 | 396 | * @cap: capability to match |
c4d27c4d | 397 | * @cpu: cpu index which the channel should be close to |
bec08513 | 398 | * |
c4d27c4d BG |
399 | * If some channels are close to the given cpu, the one with the lowest |
400 | * reference count is returned. Otherwise, cpu is ignored and only the | |
401 | * reference count is taken into account. | |
402 | * Must be called under dma_list_mutex. | |
bec08513 | 403 | */ |
c4d27c4d | 404 | static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) |
bec08513 DW |
405 | { |
406 | struct dma_device *device; | |
407 | struct dma_chan *chan; | |
bec08513 | 408 | struct dma_chan *min = NULL; |
c4d27c4d | 409 | struct dma_chan *localmin = NULL; |
bec08513 DW |
410 | |
411 | list_for_each_entry(device, &dma_device_list, global_node) { | |
59b5ec21 DW |
412 | if (!dma_has_cap(cap, device->cap_mask) || |
413 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
bec08513 DW |
414 | continue; |
415 | list_for_each_entry(chan, &device->channels, device_node) { | |
416 | if (!chan->client_count) | |
417 | continue; | |
c4d27c4d | 418 | if (!min || chan->table_count < min->table_count) |
bec08513 DW |
419 | min = chan; |
420 | ||
c4d27c4d BG |
421 | if (dma_chan_is_local(chan, cpu)) |
422 | if (!localmin || | |
423 | chan->table_count < localmin->table_count) | |
424 | localmin = chan; | |
bec08513 | 425 | } |
bec08513 DW |
426 | } |
427 | ||
c4d27c4d | 428 | chan = localmin ? localmin : min; |
bec08513 | 429 | |
c4d27c4d BG |
430 | if (chan) |
431 | chan->table_count++; | |
bec08513 | 432 | |
c4d27c4d | 433 | return chan; |
bec08513 DW |
434 | } |
435 | ||
436 | /** | |
437 | * dma_channel_rebalance - redistribute the available channels | |
438 | * | |
439 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
440 | * operation type) in the SMP case, and operation isolation (avoid | |
441 | * multi-tasking channels) in the non-SMP case. Must be called under | |
442 | * dma_list_mutex. | |
443 | */ | |
444 | static void dma_channel_rebalance(void) | |
445 | { | |
446 | struct dma_chan *chan; | |
447 | struct dma_device *device; | |
448 | int cpu; | |
449 | int cap; | |
bec08513 DW |
450 | |
451 | /* undo the last distribution */ | |
452 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
453 | for_each_possible_cpu(cpu) | |
454 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
455 | ||
59b5ec21 DW |
456 | list_for_each_entry(device, &dma_device_list, global_node) { |
457 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
458 | continue; | |
bec08513 DW |
459 | list_for_each_entry(chan, &device->channels, device_node) |
460 | chan->table_count = 0; | |
59b5ec21 | 461 | } |
bec08513 DW |
462 | |
463 | /* don't populate the channel_table if no clients are available */ | |
464 | if (!dmaengine_ref_count) | |
465 | return; | |
466 | ||
467 | /* redistribute available channels */ | |
bec08513 DW |
468 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
469 | for_each_online_cpu(cpu) { | |
c4d27c4d | 470 | chan = min_chan(cap, cpu); |
bec08513 DW |
471 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; |
472 | } | |
473 | } | |
474 | ||
a53e28da LPC |
475 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
476 | struct dma_device *dev, | |
e2346677 | 477 | dma_filter_fn fn, void *fn_param) |
59b5ec21 DW |
478 | { |
479 | struct dma_chan *chan; | |
59b5ec21 DW |
480 | |
481 | if (!__dma_device_satisfies_mask(dev, mask)) { | |
482 | pr_debug("%s: wrong capabilities\n", __func__); | |
483 | return NULL; | |
484 | } | |
485 | /* devices with multiple channels need special handling as we need to | |
486 | * ensure that all channels are either private or public. | |
487 | */ | |
488 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
489 | list_for_each_entry(chan, &dev->channels, device_node) { | |
490 | /* some channels are already publicly allocated */ | |
491 | if (chan->client_count) | |
492 | return NULL; | |
493 | } | |
494 | ||
495 | list_for_each_entry(chan, &dev->channels, device_node) { | |
496 | if (chan->client_count) { | |
497 | pr_debug("%s: %s busy\n", | |
41d5e59c | 498 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
499 | continue; |
500 | } | |
e2346677 DW |
501 | if (fn && !fn(chan, fn_param)) { |
502 | pr_debug("%s: %s filter said false\n", | |
503 | __func__, dma_chan_name(chan)); | |
504 | continue; | |
505 | } | |
506 | return chan; | |
59b5ec21 DW |
507 | } |
508 | ||
e2346677 | 509 | return NULL; |
59b5ec21 DW |
510 | } |
511 | ||
512 | /** | |
6b9019a7 | 513 | * dma_request_slave_channel - try to get specific channel exclusively |
7bb587f4 ZG |
514 | * @chan: target channel |
515 | */ | |
516 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |
517 | { | |
518 | int err = -EBUSY; | |
519 | ||
520 | /* lock against __dma_request_channel */ | |
521 | mutex_lock(&dma_list_mutex); | |
522 | ||
d9a6c8f5 | 523 | if (chan->client_count == 0) { |
7bb587f4 | 524 | err = dma_chan_get(chan); |
d9a6c8f5 VK |
525 | if (err) |
526 | pr_debug("%s: failed to get %s: (%d)\n", | |
527 | __func__, dma_chan_name(chan), err); | |
528 | } else | |
7bb587f4 ZG |
529 | chan = NULL; |
530 | ||
531 | mutex_unlock(&dma_list_mutex); | |
532 | ||
7bb587f4 ZG |
533 | |
534 | return chan; | |
535 | } | |
536 | EXPORT_SYMBOL_GPL(dma_get_slave_channel); | |
537 | ||
8010dad5 SW |
538 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) |
539 | { | |
540 | dma_cap_mask_t mask; | |
541 | struct dma_chan *chan; | |
542 | int err; | |
543 | ||
544 | dma_cap_zero(mask); | |
545 | dma_cap_set(DMA_SLAVE, mask); | |
546 | ||
547 | /* lock against __dma_request_channel */ | |
548 | mutex_lock(&dma_list_mutex); | |
549 | ||
550 | chan = private_candidate(&mask, device, NULL, NULL); | |
551 | if (chan) { | |
552 | err = dma_chan_get(chan); | |
553 | if (err) { | |
554 | pr_debug("%s: failed to get %s: (%d)\n", | |
555 | __func__, dma_chan_name(chan), err); | |
556 | chan = NULL; | |
557 | } | |
558 | } | |
559 | ||
560 | mutex_unlock(&dma_list_mutex); | |
561 | ||
562 | return chan; | |
563 | } | |
564 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); | |
565 | ||
59b5ec21 | 566 | /** |
6b9019a7 | 567 | * __dma_request_channel - try to allocate an exclusive channel |
59b5ec21 DW |
568 | * @mask: capabilities that the channel must satisfy |
569 | * @fn: optional callback to disposition available channels | |
570 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
0ad7c000 SW |
571 | * |
572 | * Returns pointer to appropriate DMA channel on success or NULL. | |
59b5ec21 | 573 | */ |
a53e28da LPC |
574 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
575 | dma_filter_fn fn, void *fn_param) | |
59b5ec21 DW |
576 | { |
577 | struct dma_device *device, *_d; | |
578 | struct dma_chan *chan = NULL; | |
59b5ec21 DW |
579 | int err; |
580 | ||
581 | /* Find a channel */ | |
582 | mutex_lock(&dma_list_mutex); | |
583 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
e2346677 DW |
584 | chan = private_candidate(mask, device, fn, fn_param); |
585 | if (chan) { | |
59b5ec21 DW |
586 | /* Found a suitable channel, try to grab, prep, and |
587 | * return it. We first set DMA_PRIVATE to disable | |
588 | * balance_ref_count as this channel will not be | |
589 | * published in the general-purpose allocator | |
590 | */ | |
591 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
0f571515 | 592 | device->privatecnt++; |
59b5ec21 DW |
593 | err = dma_chan_get(chan); |
594 | ||
595 | if (err == -ENODEV) { | |
63433250 JP |
596 | pr_debug("%s: %s module removed\n", |
597 | __func__, dma_chan_name(chan)); | |
59b5ec21 DW |
598 | list_del_rcu(&device->global_node); |
599 | } else if (err) | |
d8b53489 | 600 | pr_debug("%s: failed to get %s: (%d)\n", |
63433250 | 601 | __func__, dma_chan_name(chan), err); |
59b5ec21 DW |
602 | else |
603 | break; | |
0f571515 AN |
604 | if (--device->privatecnt == 0) |
605 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
e2346677 DW |
606 | chan = NULL; |
607 | } | |
59b5ec21 DW |
608 | } |
609 | mutex_unlock(&dma_list_mutex); | |
610 | ||
63433250 JP |
611 | pr_debug("%s: %s (%s)\n", |
612 | __func__, | |
613 | chan ? "success" : "fail", | |
41d5e59c | 614 | chan ? dma_chan_name(chan) : NULL); |
59b5ec21 DW |
615 | |
616 | return chan; | |
617 | } | |
618 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
619 | ||
9a6cecc8 JH |
620 | /** |
621 | * dma_request_slave_channel - try to allocate an exclusive slave channel | |
622 | * @dev: pointer to client device structure | |
623 | * @name: slave channel name | |
0ad7c000 SW |
624 | * |
625 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
9a6cecc8 | 626 | */ |
0ad7c000 SW |
627 | struct dma_chan *dma_request_slave_channel_reason(struct device *dev, |
628 | const char *name) | |
9a6cecc8 JH |
629 | { |
630 | /* If device-tree is present get slave info from here */ | |
631 | if (dev->of_node) | |
632 | return of_dma_request_slave_channel(dev->of_node, name); | |
633 | ||
4e82f5dd | 634 | /* If device was enumerated by ACPI get slave info from here */ |
0f6a928d AS |
635 | if (ACPI_HANDLE(dev)) |
636 | return acpi_dma_request_slave_chan_by_name(dev, name); | |
4e82f5dd | 637 | |
0ad7c000 SW |
638 | return ERR_PTR(-ENODEV); |
639 | } | |
640 | EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); | |
641 | ||
642 | /** | |
643 | * dma_request_slave_channel - try to allocate an exclusive slave channel | |
644 | * @dev: pointer to client device structure | |
645 | * @name: slave channel name | |
646 | * | |
647 | * Returns pointer to appropriate DMA channel on success or NULL. | |
648 | */ | |
649 | struct dma_chan *dma_request_slave_channel(struct device *dev, | |
650 | const char *name) | |
651 | { | |
652 | struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); | |
653 | if (IS_ERR(ch)) | |
654 | return NULL; | |
655 | return ch; | |
9a6cecc8 JH |
656 | } |
657 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | |
658 | ||
59b5ec21 DW |
659 | void dma_release_channel(struct dma_chan *chan) |
660 | { | |
661 | mutex_lock(&dma_list_mutex); | |
662 | WARN_ONCE(chan->client_count != 1, | |
663 | "chan reference count %d != 1\n", chan->client_count); | |
664 | dma_chan_put(chan); | |
0f571515 AN |
665 | /* drop PRIVATE cap enabled by __dma_request_channel() */ |
666 | if (--chan->device->privatecnt == 0) | |
667 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | |
59b5ec21 DW |
668 | mutex_unlock(&dma_list_mutex); |
669 | } | |
670 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
671 | ||
d379b01e | 672 | /** |
209b84a8 | 673 | * dmaengine_get - register interest in dma_channels |
d379b01e | 674 | */ |
209b84a8 | 675 | void dmaengine_get(void) |
d379b01e | 676 | { |
6f49a57a DW |
677 | struct dma_device *device, *_d; |
678 | struct dma_chan *chan; | |
679 | int err; | |
680 | ||
c13c8260 | 681 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
682 | dmaengine_ref_count++; |
683 | ||
684 | /* try to grab channels */ | |
59b5ec21 DW |
685 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
686 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
687 | continue; | |
6f49a57a DW |
688 | list_for_each_entry(chan, &device->channels, device_node) { |
689 | err = dma_chan_get(chan); | |
690 | if (err == -ENODEV) { | |
691 | /* module removed before we could use it */ | |
2ba05622 | 692 | list_del_rcu(&device->global_node); |
6f49a57a DW |
693 | break; |
694 | } else if (err) | |
0eb5a358 | 695 | pr_debug("%s: failed to get %s: (%d)\n", |
63433250 | 696 | __func__, dma_chan_name(chan), err); |
6f49a57a | 697 | } |
59b5ec21 | 698 | } |
6f49a57a | 699 | |
bec08513 DW |
700 | /* if this is the first reference and there were channels |
701 | * waiting we need to rebalance to get those channels | |
702 | * incorporated into the channel table | |
703 | */ | |
704 | if (dmaengine_ref_count == 1) | |
705 | dma_channel_rebalance(); | |
c13c8260 | 706 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 707 | } |
209b84a8 | 708 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
709 | |
710 | /** | |
209b84a8 | 711 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
c13c8260 | 712 | */ |
209b84a8 | 713 | void dmaengine_put(void) |
c13c8260 | 714 | { |
d379b01e | 715 | struct dma_device *device; |
c13c8260 CL |
716 | struct dma_chan *chan; |
717 | ||
c13c8260 | 718 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
719 | dmaengine_ref_count--; |
720 | BUG_ON(dmaengine_ref_count < 0); | |
721 | /* drop channel references */ | |
59b5ec21 DW |
722 | list_for_each_entry(device, &dma_device_list, global_node) { |
723 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
724 | continue; | |
6f49a57a DW |
725 | list_for_each_entry(chan, &device->channels, device_node) |
726 | dma_chan_put(chan); | |
59b5ec21 | 727 | } |
c13c8260 | 728 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 729 | } |
209b84a8 | 730 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 | 731 | |
138f4c35 DW |
732 | static bool device_has_all_tx_types(struct dma_device *device) |
733 | { | |
734 | /* A device that satisfies this test has channels that will never cause | |
735 | * an async_tx channel switch event as all possible operation types can | |
736 | * be handled. | |
737 | */ | |
738 | #ifdef CONFIG_ASYNC_TX_DMA | |
739 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
740 | return false; | |
741 | #endif | |
742 | ||
743 | #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) | |
744 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | |
745 | return false; | |
746 | #endif | |
747 | ||
138f4c35 DW |
748 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) |
749 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | |
750 | return false; | |
7b3cc2b1 DW |
751 | |
752 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | |
4499a24d DW |
753 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) |
754 | return false; | |
138f4c35 | 755 | #endif |
7b3cc2b1 | 756 | #endif |
138f4c35 DW |
757 | |
758 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | |
759 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | |
760 | return false; | |
7b3cc2b1 DW |
761 | |
762 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | |
4499a24d DW |
763 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) |
764 | return false; | |
138f4c35 | 765 | #endif |
7b3cc2b1 | 766 | #endif |
138f4c35 DW |
767 | |
768 | return true; | |
769 | } | |
770 | ||
257b17ca DW |
771 | static int get_dma_id(struct dma_device *device) |
772 | { | |
773 | int rc; | |
774 | ||
257b17ca | 775 | mutex_lock(&dma_list_mutex); |
257b17ca | 776 | |
69ee266b TH |
777 | rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL); |
778 | if (rc >= 0) | |
779 | device->dev_id = rc; | |
780 | ||
781 | mutex_unlock(&dma_list_mutex); | |
782 | return rc < 0 ? rc : 0; | |
257b17ca DW |
783 | } |
784 | ||
c13c8260 | 785 | /** |
6508871e | 786 | * dma_async_device_register - registers DMA devices found |
c13c8260 CL |
787 | * @device: &dma_device |
788 | */ | |
789 | int dma_async_device_register(struct dma_device *device) | |
790 | { | |
ff487fb7 | 791 | int chancnt = 0, rc; |
c13c8260 | 792 | struct dma_chan* chan; |
864498aa | 793 | atomic_t *idr_ref; |
c13c8260 CL |
794 | |
795 | if (!device) | |
796 | return -ENODEV; | |
797 | ||
7405f74b DW |
798 | /* validate device routines */ |
799 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | |
800 | !device->device_prep_dma_memcpy); | |
801 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | |
802 | !device->device_prep_dma_xor); | |
099f53cb DW |
803 | BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && |
804 | !device->device_prep_dma_xor_val); | |
b2f46fd8 DW |
805 | BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && |
806 | !device->device_prep_dma_pq); | |
807 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | |
808 | !device->device_prep_dma_pq_val); | |
9b941c66 | 809 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
7405f74b | 810 | !device->device_prep_dma_interrupt); |
a86ee03c IS |
811 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
812 | !device->device_prep_dma_sg); | |
782bc950 SH |
813 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && |
814 | !device->device_prep_dma_cyclic); | |
dc0ee643 | 815 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
c3635c78 | 816 | !device->device_control); |
b14dab79 JB |
817 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && |
818 | !device->device_prep_interleaved_dma); | |
7405f74b DW |
819 | |
820 | BUG_ON(!device->device_alloc_chan_resources); | |
821 | BUG_ON(!device->device_free_chan_resources); | |
07934481 | 822 | BUG_ON(!device->device_tx_status); |
7405f74b DW |
823 | BUG_ON(!device->device_issue_pending); |
824 | BUG_ON(!device->dev); | |
825 | ||
138f4c35 | 826 | /* note: this only matters in the |
5fc6d897 | 827 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
138f4c35 DW |
828 | */ |
829 | if (device_has_all_tx_types(device)) | |
830 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | |
831 | ||
864498aa DW |
832 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
833 | if (!idr_ref) | |
834 | return -ENOMEM; | |
257b17ca DW |
835 | rc = get_dma_id(device); |
836 | if (rc != 0) { | |
837 | kfree(idr_ref); | |
864498aa | 838 | return rc; |
257b17ca DW |
839 | } |
840 | ||
841 | atomic_set(idr_ref, 0); | |
c13c8260 CL |
842 | |
843 | /* represent channels in sysfs. Probably want devs too */ | |
844 | list_for_each_entry(chan, &device->channels, device_node) { | |
257b17ca | 845 | rc = -ENOMEM; |
c13c8260 CL |
846 | chan->local = alloc_percpu(typeof(*chan->local)); |
847 | if (chan->local == NULL) | |
257b17ca | 848 | goto err_out; |
41d5e59c DW |
849 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
850 | if (chan->dev == NULL) { | |
851 | free_percpu(chan->local); | |
257b17ca DW |
852 | chan->local = NULL; |
853 | goto err_out; | |
41d5e59c | 854 | } |
c13c8260 CL |
855 | |
856 | chan->chan_id = chancnt++; | |
41d5e59c DW |
857 | chan->dev->device.class = &dma_devclass; |
858 | chan->dev->device.parent = device->dev; | |
859 | chan->dev->chan = chan; | |
864498aa DW |
860 | chan->dev->idr_ref = idr_ref; |
861 | chan->dev->dev_id = device->dev_id; | |
862 | atomic_inc(idr_ref); | |
41d5e59c | 863 | dev_set_name(&chan->dev->device, "dma%dchan%d", |
06190d84 | 864 | device->dev_id, chan->chan_id); |
c13c8260 | 865 | |
41d5e59c | 866 | rc = device_register(&chan->dev->device); |
ff487fb7 | 867 | if (rc) { |
ff487fb7 JG |
868 | free_percpu(chan->local); |
869 | chan->local = NULL; | |
257b17ca DW |
870 | kfree(chan->dev); |
871 | atomic_dec(idr_ref); | |
ff487fb7 JG |
872 | goto err_out; |
873 | } | |
7cc5bf9a | 874 | chan->client_count = 0; |
c13c8260 | 875 | } |
59b5ec21 | 876 | device->chancnt = chancnt; |
c13c8260 CL |
877 | |
878 | mutex_lock(&dma_list_mutex); | |
59b5ec21 DW |
879 | /* take references on public channels */ |
880 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
881 | list_for_each_entry(chan, &device->channels, device_node) { |
882 | /* if clients are already waiting for channels we need | |
883 | * to take references on their behalf | |
884 | */ | |
885 | if (dma_chan_get(chan) == -ENODEV) { | |
886 | /* note we can only get here for the first | |
887 | * channel as the remaining channels are | |
888 | * guaranteed to get a reference | |
889 | */ | |
890 | rc = -ENODEV; | |
891 | mutex_unlock(&dma_list_mutex); | |
892 | goto err_out; | |
893 | } | |
894 | } | |
2ba05622 | 895 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
0f571515 AN |
896 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
897 | device->privatecnt++; /* Always private */ | |
bec08513 | 898 | dma_channel_rebalance(); |
c13c8260 CL |
899 | mutex_unlock(&dma_list_mutex); |
900 | ||
c13c8260 | 901 | return 0; |
ff487fb7 JG |
902 | |
903 | err_out: | |
257b17ca DW |
904 | /* if we never registered a channel just release the idr */ |
905 | if (atomic_read(idr_ref) == 0) { | |
906 | mutex_lock(&dma_list_mutex); | |
907 | idr_remove(&dma_idr, device->dev_id); | |
908 | mutex_unlock(&dma_list_mutex); | |
909 | kfree(idr_ref); | |
910 | return rc; | |
911 | } | |
912 | ||
ff487fb7 JG |
913 | list_for_each_entry(chan, &device->channels, device_node) { |
914 | if (chan->local == NULL) | |
915 | continue; | |
41d5e59c DW |
916 | mutex_lock(&dma_list_mutex); |
917 | chan->dev->chan = NULL; | |
918 | mutex_unlock(&dma_list_mutex); | |
919 | device_unregister(&chan->dev->device); | |
ff487fb7 JG |
920 | free_percpu(chan->local); |
921 | } | |
922 | return rc; | |
c13c8260 | 923 | } |
765e3d8a | 924 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 | 925 | |
6508871e | 926 | /** |
6f49a57a | 927 | * dma_async_device_unregister - unregister a DMA device |
6508871e | 928 | * @device: &dma_device |
f27c580c DW |
929 | * |
930 | * This routine is called by dma driver exit routines, dmaengine holds module | |
931 | * references to prevent it being called while channels are in use. | |
6508871e RD |
932 | */ |
933 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 CL |
934 | { |
935 | struct dma_chan *chan; | |
c13c8260 CL |
936 | |
937 | mutex_lock(&dma_list_mutex); | |
2ba05622 | 938 | list_del_rcu(&device->global_node); |
bec08513 | 939 | dma_channel_rebalance(); |
c13c8260 CL |
940 | mutex_unlock(&dma_list_mutex); |
941 | ||
942 | list_for_each_entry(chan, &device->channels, device_node) { | |
6f49a57a DW |
943 | WARN_ONCE(chan->client_count, |
944 | "%s called while %d clients hold a reference\n", | |
945 | __func__, chan->client_count); | |
41d5e59c DW |
946 | mutex_lock(&dma_list_mutex); |
947 | chan->dev->chan = NULL; | |
948 | mutex_unlock(&dma_list_mutex); | |
949 | device_unregister(&chan->dev->device); | |
adef4772 | 950 | free_percpu(chan->local); |
c13c8260 | 951 | } |
c13c8260 | 952 | } |
765e3d8a | 953 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 954 | |
45c463ae DW |
955 | struct dmaengine_unmap_pool { |
956 | struct kmem_cache *cache; | |
957 | const char *name; | |
958 | mempool_t *pool; | |
959 | size_t size; | |
960 | }; | |
7405f74b | 961 | |
45c463ae DW |
962 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
963 | static struct dmaengine_unmap_pool unmap_pool[] = { | |
964 | __UNMAP_POOL(2), | |
3cc377b9 | 965 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
45c463ae DW |
966 | __UNMAP_POOL(16), |
967 | __UNMAP_POOL(128), | |
968 | __UNMAP_POOL(256), | |
969 | #endif | |
970 | }; | |
0036731c | 971 | |
45c463ae DW |
972 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) |
973 | { | |
974 | int order = get_count_order(nr); | |
975 | ||
976 | switch (order) { | |
977 | case 0 ... 1: | |
978 | return &unmap_pool[0]; | |
979 | case 2 ... 4: | |
980 | return &unmap_pool[1]; | |
981 | case 5 ... 7: | |
982 | return &unmap_pool[2]; | |
983 | case 8: | |
984 | return &unmap_pool[3]; | |
985 | default: | |
986 | BUG(); | |
987 | return NULL; | |
0036731c | 988 | } |
45c463ae | 989 | } |
7405f74b | 990 | |
45c463ae DW |
991 | static void dmaengine_unmap(struct kref *kref) |
992 | { | |
993 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); | |
994 | struct device *dev = unmap->dev; | |
995 | int cnt, i; | |
996 | ||
997 | cnt = unmap->to_cnt; | |
998 | for (i = 0; i < cnt; i++) | |
999 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1000 | DMA_TO_DEVICE); | |
1001 | cnt += unmap->from_cnt; | |
1002 | for (; i < cnt; i++) | |
1003 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1004 | DMA_FROM_DEVICE); | |
1005 | cnt += unmap->bidi_cnt; | |
7476bd79 DW |
1006 | for (; i < cnt; i++) { |
1007 | if (unmap->addr[i] == 0) | |
1008 | continue; | |
45c463ae DW |
1009 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1010 | DMA_BIDIRECTIONAL); | |
7476bd79 | 1011 | } |
45c463ae DW |
1012 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); |
1013 | } | |
7405f74b | 1014 | |
45c463ae DW |
1015 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) |
1016 | { | |
1017 | if (unmap) | |
1018 | kref_put(&unmap->kref, dmaengine_unmap); | |
1019 | } | |
1020 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); | |
7405f74b | 1021 | |
45c463ae DW |
1022 | static void dmaengine_destroy_unmap_pool(void) |
1023 | { | |
1024 | int i; | |
1025 | ||
1026 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | |
1027 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1028 | ||
1029 | if (p->pool) | |
1030 | mempool_destroy(p->pool); | |
1031 | p->pool = NULL; | |
1032 | if (p->cache) | |
1033 | kmem_cache_destroy(p->cache); | |
1034 | p->cache = NULL; | |
1035 | } | |
7405f74b | 1036 | } |
7405f74b | 1037 | |
45c463ae | 1038 | static int __init dmaengine_init_unmap_pool(void) |
7405f74b | 1039 | { |
45c463ae | 1040 | int i; |
7405f74b | 1041 | |
45c463ae DW |
1042 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
1043 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1044 | size_t size; | |
0036731c | 1045 | |
45c463ae DW |
1046 | size = sizeof(struct dmaengine_unmap_data) + |
1047 | sizeof(dma_addr_t) * p->size; | |
1048 | ||
1049 | p->cache = kmem_cache_create(p->name, size, 0, | |
1050 | SLAB_HWCACHE_ALIGN, NULL); | |
1051 | if (!p->cache) | |
1052 | break; | |
1053 | p->pool = mempool_create_slab_pool(1, p->cache); | |
1054 | if (!p->pool) | |
1055 | break; | |
0036731c | 1056 | } |
7405f74b | 1057 | |
45c463ae DW |
1058 | if (i == ARRAY_SIZE(unmap_pool)) |
1059 | return 0; | |
7405f74b | 1060 | |
45c463ae DW |
1061 | dmaengine_destroy_unmap_pool(); |
1062 | return -ENOMEM; | |
1063 | } | |
7405f74b | 1064 | |
89716462 | 1065 | struct dmaengine_unmap_data * |
45c463ae DW |
1066 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) |
1067 | { | |
1068 | struct dmaengine_unmap_data *unmap; | |
1069 | ||
1070 | unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); | |
1071 | if (!unmap) | |
1072 | return NULL; | |
1073 | ||
1074 | memset(unmap, 0, sizeof(*unmap)); | |
1075 | kref_init(&unmap->kref); | |
1076 | unmap->dev = dev; | |
1077 | ||
1078 | return unmap; | |
7405f74b | 1079 | } |
89716462 | 1080 | EXPORT_SYMBOL(dmaengine_get_unmap_data); |
7405f74b DW |
1081 | |
1082 | /** | |
1083 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | |
1084 | * @chan: DMA channel to offload copy to | |
1085 | * @dest_pg: destination page | |
1086 | * @dest_off: offset in page to copy to | |
1087 | * @src_pg: source page | |
1088 | * @src_off: offset in page to copy from | |
1089 | * @len: length | |
1090 | * | |
1091 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | |
1092 | * address according to the DMA mapping API rules for streaming mappings. | |
1093 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | |
1094 | * (kernel memory or locked user space pages). | |
1095 | */ | |
1096 | dma_cookie_t | |
1097 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |
1098 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | |
1099 | size_t len) | |
1100 | { | |
1101 | struct dma_device *dev = chan->device; | |
1102 | struct dma_async_tx_descriptor *tx; | |
45c463ae | 1103 | struct dmaengine_unmap_data *unmap; |
7405f74b | 1104 | dma_cookie_t cookie; |
4f005dbe | 1105 | unsigned long flags; |
7405f74b | 1106 | |
8194ee27 | 1107 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); |
45c463ae DW |
1108 | if (!unmap) |
1109 | return -ENOMEM; | |
1110 | ||
1111 | unmap->to_cnt = 1; | |
1112 | unmap->from_cnt = 1; | |
1113 | unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, | |
1114 | DMA_TO_DEVICE); | |
1115 | unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, | |
1116 | DMA_FROM_DEVICE); | |
1117 | unmap->len = len; | |
4f005dbe | 1118 | flags = DMA_CTRL_ACK; |
45c463ae DW |
1119 | tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], |
1120 | len, flags); | |
0036731c DW |
1121 | |
1122 | if (!tx) { | |
45c463ae | 1123 | dmaengine_unmap_put(unmap); |
7405f74b | 1124 | return -ENOMEM; |
0036731c | 1125 | } |
7405f74b | 1126 | |
45c463ae | 1127 | dma_set_unmap(tx, unmap); |
7405f74b | 1128 | cookie = tx->tx_submit(tx); |
45c463ae | 1129 | dmaengine_unmap_put(unmap); |
7405f74b | 1130 | |
e7dcaa47 CL |
1131 | preempt_disable(); |
1132 | __this_cpu_add(chan->local->bytes_transferred, len); | |
1133 | __this_cpu_inc(chan->local->memcpy_count); | |
1134 | preempt_enable(); | |
7405f74b DW |
1135 | |
1136 | return cookie; | |
1137 | } | |
1138 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | |
1139 | ||
56ea27fd DW |
1140 | /** |
1141 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | |
1142 | * @chan: DMA channel to offload copy to | |
1143 | * @dest: destination address (virtual) | |
1144 | * @src: source address (virtual) | |
1145 | * @len: length | |
1146 | * | |
1147 | * Both @dest and @src must be mappable to a bus address according to the | |
1148 | * DMA mapping API rules for streaming mappings. | |
1149 | * Both @dest and @src must stay memory resident (kernel memory or locked | |
1150 | * user space pages). | |
1151 | */ | |
1152 | dma_cookie_t | |
1153 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |
1154 | void *src, size_t len) | |
1155 | { | |
1156 | return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), | |
1157 | (unsigned long) dest & ~PAGE_MASK, | |
1158 | virt_to_page(src), | |
1159 | (unsigned long) src & ~PAGE_MASK, len); | |
1160 | } | |
7405f74b DW |
1161 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); |
1162 | ||
1163 | /** | |
1164 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | |
1165 | * @chan: DMA channel to offload copy to | |
1166 | * @page: destination page | |
1167 | * @offset: offset in page to copy to | |
1168 | * @kdata: source address (virtual) | |
1169 | * @len: length | |
1170 | * | |
1171 | * Both @page/@offset and @kdata must be mappable to a bus address according | |
1172 | * to the DMA mapping API rules for streaming mappings. | |
1173 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | |
1174 | * locked user space pages) | |
1175 | */ | |
1176 | dma_cookie_t | |
1177 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |
56ea27fd | 1178 | unsigned int offset, void *kdata, size_t len) |
7405f74b | 1179 | { |
56ea27fd DW |
1180 | return dma_async_memcpy_pg_to_pg(chan, page, offset, |
1181 | virt_to_page(kdata), | |
1182 | (unsigned long) kdata & ~PAGE_MASK, len); | |
7405f74b DW |
1183 | } |
1184 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | |
1185 | ||
7405f74b DW |
1186 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
1187 | struct dma_chan *chan) | |
1188 | { | |
1189 | tx->chan = chan; | |
5fc6d897 | 1190 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
7405f74b | 1191 | spin_lock_init(&tx->lock); |
caa20d97 | 1192 | #endif |
7405f74b DW |
1193 | } |
1194 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
1195 | ||
07f2211e DW |
1196 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
1197 | * @tx: in-flight transaction to wait on | |
07f2211e DW |
1198 | */ |
1199 | enum dma_status | |
1200 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
1201 | { | |
95475e57 | 1202 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
07f2211e DW |
1203 | |
1204 | if (!tx) | |
adfedd9a | 1205 | return DMA_COMPLETE; |
07f2211e | 1206 | |
95475e57 DW |
1207 | while (tx->cookie == -EBUSY) { |
1208 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
1209 | pr_err("%s timeout waiting for descriptor submission\n", | |
63433250 | 1210 | __func__); |
95475e57 DW |
1211 | return DMA_ERROR; |
1212 | } | |
1213 | cpu_relax(); | |
1214 | } | |
1215 | return dma_sync_wait(tx->chan, tx->cookie); | |
07f2211e DW |
1216 | } |
1217 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
1218 | ||
1219 | /* dma_run_dependencies - helper routine for dma drivers to process | |
1220 | * (start) dependent operations on their target channel | |
1221 | * @tx: transaction with dependencies | |
1222 | */ | |
1223 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
1224 | { | |
caa20d97 | 1225 | struct dma_async_tx_descriptor *dep = txd_next(tx); |
07f2211e DW |
1226 | struct dma_async_tx_descriptor *dep_next; |
1227 | struct dma_chan *chan; | |
1228 | ||
1229 | if (!dep) | |
1230 | return; | |
1231 | ||
dd59b853 | 1232 | /* we'll submit tx->next now, so clear the link */ |
caa20d97 | 1233 | txd_clear_next(tx); |
07f2211e DW |
1234 | chan = dep->chan; |
1235 | ||
1236 | /* keep submitting up until a channel switch is detected | |
1237 | * in that case we will be called again as a result of | |
1238 | * processing the interrupt from async_tx_channel_switch | |
1239 | */ | |
1240 | for (; dep; dep = dep_next) { | |
caa20d97 DW |
1241 | txd_lock(dep); |
1242 | txd_clear_parent(dep); | |
1243 | dep_next = txd_next(dep); | |
07f2211e | 1244 | if (dep_next && dep_next->chan == chan) |
caa20d97 | 1245 | txd_clear_next(dep); /* ->next will be submitted */ |
07f2211e DW |
1246 | else |
1247 | dep_next = NULL; /* submit current dep and terminate */ | |
caa20d97 | 1248 | txd_unlock(dep); |
07f2211e DW |
1249 | |
1250 | dep->tx_submit(dep); | |
1251 | } | |
1252 | ||
1253 | chan->device->device_issue_pending(chan); | |
1254 | } | |
1255 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
1256 | ||
c13c8260 CL |
1257 | static int __init dma_bus_init(void) |
1258 | { | |
45c463ae DW |
1259 | int err = dmaengine_init_unmap_pool(); |
1260 | ||
1261 | if (err) | |
1262 | return err; | |
c13c8260 CL |
1263 | return class_register(&dma_devclass); |
1264 | } | |
652afc27 | 1265 | arch_initcall(dma_bus_init); |
c13c8260 | 1266 | |
bec08513 | 1267 |