Merge branch 'x86-setup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / dma / dmaengine.c
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/mm.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
60 #include <linux/idr.h>
61
62 static DEFINE_MUTEX(dma_list_mutex);
63 static LIST_HEAD(dma_device_list);
64 static long dmaengine_ref_count;
65 static struct idr dma_idr;
66
67 /* --- sysfs implementation --- */
68
69 /**
70 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
71 * @dev - device node
72 *
73 * Must be called under dma_list_mutex
74 */
75 static struct dma_chan *dev_to_dma_chan(struct device *dev)
76 {
77 struct dma_chan_dev *chan_dev;
78
79 chan_dev = container_of(dev, typeof(*chan_dev), device);
80 return chan_dev->chan;
81 }
82
83 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
84 {
85 struct dma_chan *chan;
86 unsigned long count = 0;
87 int i;
88 int err;
89
90 mutex_lock(&dma_list_mutex);
91 chan = dev_to_dma_chan(dev);
92 if (chan) {
93 for_each_possible_cpu(i)
94 count += per_cpu_ptr(chan->local, i)->memcpy_count;
95 err = sprintf(buf, "%lu\n", count);
96 } else
97 err = -ENODEV;
98 mutex_unlock(&dma_list_mutex);
99
100 return err;
101 }
102
103 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
104 char *buf)
105 {
106 struct dma_chan *chan;
107 unsigned long count = 0;
108 int i;
109 int err;
110
111 mutex_lock(&dma_list_mutex);
112 chan = dev_to_dma_chan(dev);
113 if (chan) {
114 for_each_possible_cpu(i)
115 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
116 err = sprintf(buf, "%lu\n", count);
117 } else
118 err = -ENODEV;
119 mutex_unlock(&dma_list_mutex);
120
121 return err;
122 }
123
124 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
125 {
126 struct dma_chan *chan;
127 int err;
128
129 mutex_lock(&dma_list_mutex);
130 chan = dev_to_dma_chan(dev);
131 if (chan)
132 err = sprintf(buf, "%d\n", chan->client_count);
133 else
134 err = -ENODEV;
135 mutex_unlock(&dma_list_mutex);
136
137 return err;
138 }
139
140 static struct device_attribute dma_attrs[] = {
141 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
142 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
143 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
144 __ATTR_NULL
145 };
146
147 static void chan_dev_release(struct device *dev)
148 {
149 struct dma_chan_dev *chan_dev;
150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 mutex_lock(&dma_list_mutex);
154 idr_remove(&dma_idr, chan_dev->dev_id);
155 mutex_unlock(&dma_list_mutex);
156 kfree(chan_dev->idr_ref);
157 }
158 kfree(chan_dev);
159 }
160
161 static struct class dma_devclass = {
162 .name = "dma",
163 .dev_attrs = dma_attrs,
164 .dev_release = chan_dev_release,
165 };
166
167 /* --- client and device registration --- */
168
169 #define dma_device_satisfies_mask(device, mask) \
170 __dma_device_satisfies_mask((device), &(mask))
171 static int
172 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
173 {
174 dma_cap_mask_t has;
175
176 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
177 DMA_TX_TYPE_END);
178 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
179 }
180
181 static struct module *dma_chan_to_owner(struct dma_chan *chan)
182 {
183 return chan->device->dev->driver->owner;
184 }
185
186 /**
187 * balance_ref_count - catch up the channel reference count
188 * @chan - channel to balance ->client_count versus dmaengine_ref_count
189 *
190 * balance_ref_count must be called under dma_list_mutex
191 */
192 static void balance_ref_count(struct dma_chan *chan)
193 {
194 struct module *owner = dma_chan_to_owner(chan);
195
196 while (chan->client_count < dmaengine_ref_count) {
197 __module_get(owner);
198 chan->client_count++;
199 }
200 }
201
202 /**
203 * dma_chan_get - try to grab a dma channel's parent driver module
204 * @chan - channel to grab
205 *
206 * Must be called under dma_list_mutex
207 */
208 static int dma_chan_get(struct dma_chan *chan)
209 {
210 int err = -ENODEV;
211 struct module *owner = dma_chan_to_owner(chan);
212
213 if (chan->client_count) {
214 __module_get(owner);
215 err = 0;
216 } else if (try_module_get(owner))
217 err = 0;
218
219 if (err == 0)
220 chan->client_count++;
221
222 /* allocate upon first client reference */
223 if (chan->client_count == 1 && err == 0) {
224 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
225
226 if (desc_cnt < 0) {
227 err = desc_cnt;
228 chan->client_count = 0;
229 module_put(owner);
230 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
231 balance_ref_count(chan);
232 }
233
234 return err;
235 }
236
237 /**
238 * dma_chan_put - drop a reference to a dma channel's parent driver module
239 * @chan - channel to release
240 *
241 * Must be called under dma_list_mutex
242 */
243 static void dma_chan_put(struct dma_chan *chan)
244 {
245 if (!chan->client_count)
246 return; /* this channel failed alloc_chan_resources */
247 chan->client_count--;
248 module_put(dma_chan_to_owner(chan));
249 if (chan->client_count == 0)
250 chan->device->device_free_chan_resources(chan);
251 }
252
253 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
254 {
255 enum dma_status status;
256 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
257
258 dma_async_issue_pending(chan);
259 do {
260 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
261 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
262 printk(KERN_ERR "dma_sync_wait_timeout!\n");
263 return DMA_ERROR;
264 }
265 } while (status == DMA_IN_PROGRESS);
266
267 return status;
268 }
269 EXPORT_SYMBOL(dma_sync_wait);
270
271 /**
272 * dma_cap_mask_all - enable iteration over all operation types
273 */
274 static dma_cap_mask_t dma_cap_mask_all;
275
276 /**
277 * dma_chan_tbl_ent - tracks channel allocations per core/operation
278 * @chan - associated channel for this entry
279 */
280 struct dma_chan_tbl_ent {
281 struct dma_chan *chan;
282 };
283
284 /**
285 * channel_table - percpu lookup table for memory-to-memory offload providers
286 */
287 static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
288
289 static int __init dma_channel_table_init(void)
290 {
291 enum dma_transaction_type cap;
292 int err = 0;
293
294 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
295
296 /* 'interrupt', 'private', and 'slave' are channel capabilities,
297 * but are not associated with an operation so they do not need
298 * an entry in the channel_table
299 */
300 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
301 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
302 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
303
304 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
305 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
306 if (!channel_table[cap]) {
307 err = -ENOMEM;
308 break;
309 }
310 }
311
312 if (err) {
313 pr_err("dmaengine: initialization failure\n");
314 for_each_dma_cap_mask(cap, dma_cap_mask_all)
315 if (channel_table[cap])
316 free_percpu(channel_table[cap]);
317 }
318
319 return err;
320 }
321 arch_initcall(dma_channel_table_init);
322
323 /**
324 * dma_find_channel - find a channel to carry out the operation
325 * @tx_type: transaction type
326 */
327 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
328 {
329 struct dma_chan *chan;
330 int cpu;
331
332 cpu = get_cpu();
333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
334 put_cpu();
335
336 return chan;
337 }
338 EXPORT_SYMBOL(dma_find_channel);
339
340 /**
341 * dma_issue_pending_all - flush all pending operations across all channels
342 */
343 void dma_issue_pending_all(void)
344 {
345 struct dma_device *device;
346 struct dma_chan *chan;
347
348 rcu_read_lock();
349 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
350 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
351 continue;
352 list_for_each_entry(chan, &device->channels, device_node)
353 if (chan->client_count)
354 device->device_issue_pending(chan);
355 }
356 rcu_read_unlock();
357 }
358 EXPORT_SYMBOL(dma_issue_pending_all);
359
360 /**
361 * nth_chan - returns the nth channel of the given capability
362 * @cap: capability to match
363 * @n: nth channel desired
364 *
365 * Defaults to returning the channel with the desired capability and the
366 * lowest reference count when 'n' cannot be satisfied. Must be called
367 * under dma_list_mutex.
368 */
369 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
370 {
371 struct dma_device *device;
372 struct dma_chan *chan;
373 struct dma_chan *ret = NULL;
374 struct dma_chan *min = NULL;
375
376 list_for_each_entry(device, &dma_device_list, global_node) {
377 if (!dma_has_cap(cap, device->cap_mask) ||
378 dma_has_cap(DMA_PRIVATE, device->cap_mask))
379 continue;
380 list_for_each_entry(chan, &device->channels, device_node) {
381 if (!chan->client_count)
382 continue;
383 if (!min)
384 min = chan;
385 else if (chan->table_count < min->table_count)
386 min = chan;
387
388 if (n-- == 0) {
389 ret = chan;
390 break; /* done */
391 }
392 }
393 if (ret)
394 break; /* done */
395 }
396
397 if (!ret)
398 ret = min;
399
400 if (ret)
401 ret->table_count++;
402
403 return ret;
404 }
405
406 /**
407 * dma_channel_rebalance - redistribute the available channels
408 *
409 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
410 * operation type) in the SMP case, and operation isolation (avoid
411 * multi-tasking channels) in the non-SMP case. Must be called under
412 * dma_list_mutex.
413 */
414 static void dma_channel_rebalance(void)
415 {
416 struct dma_chan *chan;
417 struct dma_device *device;
418 int cpu;
419 int cap;
420 int n;
421
422 /* undo the last distribution */
423 for_each_dma_cap_mask(cap, dma_cap_mask_all)
424 for_each_possible_cpu(cpu)
425 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
426
427 list_for_each_entry(device, &dma_device_list, global_node) {
428 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
429 continue;
430 list_for_each_entry(chan, &device->channels, device_node)
431 chan->table_count = 0;
432 }
433
434 /* don't populate the channel_table if no clients are available */
435 if (!dmaengine_ref_count)
436 return;
437
438 /* redistribute available channels */
439 n = 0;
440 for_each_dma_cap_mask(cap, dma_cap_mask_all)
441 for_each_online_cpu(cpu) {
442 if (num_possible_cpus() > 1)
443 chan = nth_chan(cap, n++);
444 else
445 chan = nth_chan(cap, -1);
446
447 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
448 }
449 }
450
451 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
452 dma_filter_fn fn, void *fn_param)
453 {
454 struct dma_chan *chan;
455
456 if (!__dma_device_satisfies_mask(dev, mask)) {
457 pr_debug("%s: wrong capabilities\n", __func__);
458 return NULL;
459 }
460 /* devices with multiple channels need special handling as we need to
461 * ensure that all channels are either private or public.
462 */
463 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
464 list_for_each_entry(chan, &dev->channels, device_node) {
465 /* some channels are already publicly allocated */
466 if (chan->client_count)
467 return NULL;
468 }
469
470 list_for_each_entry(chan, &dev->channels, device_node) {
471 if (chan->client_count) {
472 pr_debug("%s: %s busy\n",
473 __func__, dma_chan_name(chan));
474 continue;
475 }
476 if (fn && !fn(chan, fn_param)) {
477 pr_debug("%s: %s filter said false\n",
478 __func__, dma_chan_name(chan));
479 continue;
480 }
481 return chan;
482 }
483
484 return NULL;
485 }
486
487 /**
488 * dma_request_channel - try to allocate an exclusive channel
489 * @mask: capabilities that the channel must satisfy
490 * @fn: optional callback to disposition available channels
491 * @fn_param: opaque parameter to pass to dma_filter_fn
492 */
493 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
494 {
495 struct dma_device *device, *_d;
496 struct dma_chan *chan = NULL;
497 int err;
498
499 /* Find a channel */
500 mutex_lock(&dma_list_mutex);
501 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
502 chan = private_candidate(mask, device, fn, fn_param);
503 if (chan) {
504 /* Found a suitable channel, try to grab, prep, and
505 * return it. We first set DMA_PRIVATE to disable
506 * balance_ref_count as this channel will not be
507 * published in the general-purpose allocator
508 */
509 dma_cap_set(DMA_PRIVATE, device->cap_mask);
510 err = dma_chan_get(chan);
511
512 if (err == -ENODEV) {
513 pr_debug("%s: %s module removed\n", __func__,
514 dma_chan_name(chan));
515 list_del_rcu(&device->global_node);
516 } else if (err)
517 pr_err("dmaengine: failed to get %s: (%d)\n",
518 dma_chan_name(chan), err);
519 else
520 break;
521 chan->private = NULL;
522 chan = NULL;
523 }
524 }
525 mutex_unlock(&dma_list_mutex);
526
527 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
528 chan ? dma_chan_name(chan) : NULL);
529
530 return chan;
531 }
532 EXPORT_SYMBOL_GPL(__dma_request_channel);
533
534 void dma_release_channel(struct dma_chan *chan)
535 {
536 mutex_lock(&dma_list_mutex);
537 WARN_ONCE(chan->client_count != 1,
538 "chan reference count %d != 1\n", chan->client_count);
539 dma_chan_put(chan);
540 chan->private = NULL;
541 mutex_unlock(&dma_list_mutex);
542 }
543 EXPORT_SYMBOL_GPL(dma_release_channel);
544
545 /**
546 * dmaengine_get - register interest in dma_channels
547 */
548 void dmaengine_get(void)
549 {
550 struct dma_device *device, *_d;
551 struct dma_chan *chan;
552 int err;
553
554 mutex_lock(&dma_list_mutex);
555 dmaengine_ref_count++;
556
557 /* try to grab channels */
558 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
559 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
560 continue;
561 list_for_each_entry(chan, &device->channels, device_node) {
562 err = dma_chan_get(chan);
563 if (err == -ENODEV) {
564 /* module removed before we could use it */
565 list_del_rcu(&device->global_node);
566 break;
567 } else if (err)
568 pr_err("dmaengine: failed to get %s: (%d)\n",
569 dma_chan_name(chan), err);
570 }
571 }
572
573 /* if this is the first reference and there were channels
574 * waiting we need to rebalance to get those channels
575 * incorporated into the channel table
576 */
577 if (dmaengine_ref_count == 1)
578 dma_channel_rebalance();
579 mutex_unlock(&dma_list_mutex);
580 }
581 EXPORT_SYMBOL(dmaengine_get);
582
583 /**
584 * dmaengine_put - let dma drivers be removed when ref_count == 0
585 */
586 void dmaengine_put(void)
587 {
588 struct dma_device *device;
589 struct dma_chan *chan;
590
591 mutex_lock(&dma_list_mutex);
592 dmaengine_ref_count--;
593 BUG_ON(dmaengine_ref_count < 0);
594 /* drop channel references */
595 list_for_each_entry(device, &dma_device_list, global_node) {
596 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
597 continue;
598 list_for_each_entry(chan, &device->channels, device_node)
599 dma_chan_put(chan);
600 }
601 mutex_unlock(&dma_list_mutex);
602 }
603 EXPORT_SYMBOL(dmaengine_put);
604
605 /**
606 * dma_async_device_register - registers DMA devices found
607 * @device: &dma_device
608 */
609 int dma_async_device_register(struct dma_device *device)
610 {
611 int chancnt = 0, rc;
612 struct dma_chan* chan;
613 atomic_t *idr_ref;
614
615 if (!device)
616 return -ENODEV;
617
618 /* validate device routines */
619 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
620 !device->device_prep_dma_memcpy);
621 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
622 !device->device_prep_dma_xor);
623 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
624 !device->device_prep_dma_zero_sum);
625 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
626 !device->device_prep_dma_memset);
627 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
628 !device->device_prep_dma_interrupt);
629 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
630 !device->device_prep_slave_sg);
631 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
632 !device->device_terminate_all);
633
634 BUG_ON(!device->device_alloc_chan_resources);
635 BUG_ON(!device->device_free_chan_resources);
636 BUG_ON(!device->device_is_tx_complete);
637 BUG_ON(!device->device_issue_pending);
638 BUG_ON(!device->dev);
639
640 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
641 if (!idr_ref)
642 return -ENOMEM;
643 atomic_set(idr_ref, 0);
644 idr_retry:
645 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
646 return -ENOMEM;
647 mutex_lock(&dma_list_mutex);
648 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
649 mutex_unlock(&dma_list_mutex);
650 if (rc == -EAGAIN)
651 goto idr_retry;
652 else if (rc != 0)
653 return rc;
654
655 /* represent channels in sysfs. Probably want devs too */
656 list_for_each_entry(chan, &device->channels, device_node) {
657 chan->local = alloc_percpu(typeof(*chan->local));
658 if (chan->local == NULL)
659 continue;
660 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
661 if (chan->dev == NULL) {
662 free_percpu(chan->local);
663 continue;
664 }
665
666 chan->chan_id = chancnt++;
667 chan->dev->device.class = &dma_devclass;
668 chan->dev->device.parent = device->dev;
669 chan->dev->chan = chan;
670 chan->dev->idr_ref = idr_ref;
671 chan->dev->dev_id = device->dev_id;
672 atomic_inc(idr_ref);
673 dev_set_name(&chan->dev->device, "dma%dchan%d",
674 device->dev_id, chan->chan_id);
675
676 rc = device_register(&chan->dev->device);
677 if (rc) {
678 free_percpu(chan->local);
679 chan->local = NULL;
680 goto err_out;
681 }
682 chan->client_count = 0;
683 }
684 device->chancnt = chancnt;
685
686 mutex_lock(&dma_list_mutex);
687 /* take references on public channels */
688 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
689 list_for_each_entry(chan, &device->channels, device_node) {
690 /* if clients are already waiting for channels we need
691 * to take references on their behalf
692 */
693 if (dma_chan_get(chan) == -ENODEV) {
694 /* note we can only get here for the first
695 * channel as the remaining channels are
696 * guaranteed to get a reference
697 */
698 rc = -ENODEV;
699 mutex_unlock(&dma_list_mutex);
700 goto err_out;
701 }
702 }
703 list_add_tail_rcu(&device->global_node, &dma_device_list);
704 dma_channel_rebalance();
705 mutex_unlock(&dma_list_mutex);
706
707 return 0;
708
709 err_out:
710 list_for_each_entry(chan, &device->channels, device_node) {
711 if (chan->local == NULL)
712 continue;
713 mutex_lock(&dma_list_mutex);
714 chan->dev->chan = NULL;
715 mutex_unlock(&dma_list_mutex);
716 device_unregister(&chan->dev->device);
717 free_percpu(chan->local);
718 }
719 return rc;
720 }
721 EXPORT_SYMBOL(dma_async_device_register);
722
723 /**
724 * dma_async_device_unregister - unregister a DMA device
725 * @device: &dma_device
726 *
727 * This routine is called by dma driver exit routines, dmaengine holds module
728 * references to prevent it being called while channels are in use.
729 */
730 void dma_async_device_unregister(struct dma_device *device)
731 {
732 struct dma_chan *chan;
733
734 mutex_lock(&dma_list_mutex);
735 list_del_rcu(&device->global_node);
736 dma_channel_rebalance();
737 mutex_unlock(&dma_list_mutex);
738
739 list_for_each_entry(chan, &device->channels, device_node) {
740 WARN_ONCE(chan->client_count,
741 "%s called while %d clients hold a reference\n",
742 __func__, chan->client_count);
743 mutex_lock(&dma_list_mutex);
744 chan->dev->chan = NULL;
745 mutex_unlock(&dma_list_mutex);
746 device_unregister(&chan->dev->device);
747 }
748 }
749 EXPORT_SYMBOL(dma_async_device_unregister);
750
751 /**
752 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
753 * @chan: DMA channel to offload copy to
754 * @dest: destination address (virtual)
755 * @src: source address (virtual)
756 * @len: length
757 *
758 * Both @dest and @src must be mappable to a bus address according to the
759 * DMA mapping API rules for streaming mappings.
760 * Both @dest and @src must stay memory resident (kernel memory or locked
761 * user space pages).
762 */
763 dma_cookie_t
764 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
765 void *src, size_t len)
766 {
767 struct dma_device *dev = chan->device;
768 struct dma_async_tx_descriptor *tx;
769 dma_addr_t dma_dest, dma_src;
770 dma_cookie_t cookie;
771 int cpu;
772
773 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
774 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
775 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
776 DMA_CTRL_ACK);
777
778 if (!tx) {
779 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
780 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
781 return -ENOMEM;
782 }
783
784 tx->callback = NULL;
785 cookie = tx->tx_submit(tx);
786
787 cpu = get_cpu();
788 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
789 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
790 put_cpu();
791
792 return cookie;
793 }
794 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
795
796 /**
797 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
798 * @chan: DMA channel to offload copy to
799 * @page: destination page
800 * @offset: offset in page to copy to
801 * @kdata: source address (virtual)
802 * @len: length
803 *
804 * Both @page/@offset and @kdata must be mappable to a bus address according
805 * to the DMA mapping API rules for streaming mappings.
806 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
807 * locked user space pages)
808 */
809 dma_cookie_t
810 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
811 unsigned int offset, void *kdata, size_t len)
812 {
813 struct dma_device *dev = chan->device;
814 struct dma_async_tx_descriptor *tx;
815 dma_addr_t dma_dest, dma_src;
816 dma_cookie_t cookie;
817 int cpu;
818
819 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
820 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
821 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
822 DMA_CTRL_ACK);
823
824 if (!tx) {
825 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
826 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
827 return -ENOMEM;
828 }
829
830 tx->callback = NULL;
831 cookie = tx->tx_submit(tx);
832
833 cpu = get_cpu();
834 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
835 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
836 put_cpu();
837
838 return cookie;
839 }
840 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
841
842 /**
843 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
844 * @chan: DMA channel to offload copy to
845 * @dest_pg: destination page
846 * @dest_off: offset in page to copy to
847 * @src_pg: source page
848 * @src_off: offset in page to copy from
849 * @len: length
850 *
851 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
852 * address according to the DMA mapping API rules for streaming mappings.
853 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
854 * (kernel memory or locked user space pages).
855 */
856 dma_cookie_t
857 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
858 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
859 size_t len)
860 {
861 struct dma_device *dev = chan->device;
862 struct dma_async_tx_descriptor *tx;
863 dma_addr_t dma_dest, dma_src;
864 dma_cookie_t cookie;
865 int cpu;
866
867 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
868 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
869 DMA_FROM_DEVICE);
870 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
871 DMA_CTRL_ACK);
872
873 if (!tx) {
874 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
875 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
876 return -ENOMEM;
877 }
878
879 tx->callback = NULL;
880 cookie = tx->tx_submit(tx);
881
882 cpu = get_cpu();
883 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
884 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
885 put_cpu();
886
887 return cookie;
888 }
889 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
890
891 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
892 struct dma_chan *chan)
893 {
894 tx->chan = chan;
895 spin_lock_init(&tx->lock);
896 }
897 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
898
899 /* dma_wait_for_async_tx - spin wait for a transaction to complete
900 * @tx: in-flight transaction to wait on
901 *
902 * This routine assumes that tx was obtained from a call to async_memcpy,
903 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
904 * and submitted). Walking the parent chain is only meant to cover for DMA
905 * drivers that do not implement the DMA_INTERRUPT capability and may race with
906 * the driver's descriptor cleanup routine.
907 */
908 enum dma_status
909 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
910 {
911 enum dma_status status;
912 struct dma_async_tx_descriptor *iter;
913 struct dma_async_tx_descriptor *parent;
914
915 if (!tx)
916 return DMA_SUCCESS;
917
918 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
919 " %s\n", __func__, dma_chan_name(tx->chan));
920
921 /* poll through the dependency chain, return when tx is complete */
922 do {
923 iter = tx;
924
925 /* find the root of the unsubmitted dependency chain */
926 do {
927 parent = iter->parent;
928 if (!parent)
929 break;
930 else
931 iter = parent;
932 } while (parent);
933
934 /* there is a small window for ->parent == NULL and
935 * ->cookie == -EBUSY
936 */
937 while (iter->cookie == -EBUSY)
938 cpu_relax();
939
940 status = dma_sync_wait(iter->chan, iter->cookie);
941 } while (status == DMA_IN_PROGRESS || (iter != tx));
942
943 return status;
944 }
945 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
946
947 /* dma_run_dependencies - helper routine for dma drivers to process
948 * (start) dependent operations on their target channel
949 * @tx: transaction with dependencies
950 */
951 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
952 {
953 struct dma_async_tx_descriptor *dep = tx->next;
954 struct dma_async_tx_descriptor *dep_next;
955 struct dma_chan *chan;
956
957 if (!dep)
958 return;
959
960 /* we'll submit tx->next now, so clear the link */
961 tx->next = NULL;
962 chan = dep->chan;
963
964 /* keep submitting up until a channel switch is detected
965 * in that case we will be called again as a result of
966 * processing the interrupt from async_tx_channel_switch
967 */
968 for (; dep; dep = dep_next) {
969 spin_lock_bh(&dep->lock);
970 dep->parent = NULL;
971 dep_next = dep->next;
972 if (dep_next && dep_next->chan == chan)
973 dep->next = NULL; /* ->next will be submitted */
974 else
975 dep_next = NULL; /* submit current dep and terminate */
976 spin_unlock_bh(&dep->lock);
977
978 dep->tx_submit(dep);
979 }
980
981 chan->device->device_issue_pending(chan);
982 }
983 EXPORT_SYMBOL_GPL(dma_run_dependencies);
984
985 static int __init dma_bus_init(void)
986 {
987 idr_init(&dma_idr);
988 mutex_init(&dma_list_mutex);
989 return class_register(&dma_devclass);
990 }
991 arch_initcall(dma_bus_init);
992
993
This page took 0.0513749999999999 seconds and 5 git commands to generate.