Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
[deliverable/linux.git] / drivers / dma / dmaengine.c
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65
66 static DEFINE_MUTEX(dma_list_mutex);
67 static DEFINE_IDR(dma_idr);
68 static LIST_HEAD(dma_device_list);
69 static long dmaengine_ref_count;
70
71 /* --- sysfs implementation --- */
72
73 /**
74 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
75 * @dev - device node
76 *
77 * Must be called under dma_list_mutex
78 */
79 static struct dma_chan *dev_to_dma_chan(struct device *dev)
80 {
81 struct dma_chan_dev *chan_dev;
82
83 chan_dev = container_of(dev, typeof(*chan_dev), device);
84 return chan_dev->chan;
85 }
86
87 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
88 {
89 struct dma_chan *chan;
90 unsigned long count = 0;
91 int i;
92 int err;
93
94 mutex_lock(&dma_list_mutex);
95 chan = dev_to_dma_chan(dev);
96 if (chan) {
97 for_each_possible_cpu(i)
98 count += per_cpu_ptr(chan->local, i)->memcpy_count;
99 err = sprintf(buf, "%lu\n", count);
100 } else
101 err = -ENODEV;
102 mutex_unlock(&dma_list_mutex);
103
104 return err;
105 }
106
107 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
108 char *buf)
109 {
110 struct dma_chan *chan;
111 unsigned long count = 0;
112 int i;
113 int err;
114
115 mutex_lock(&dma_list_mutex);
116 chan = dev_to_dma_chan(dev);
117 if (chan) {
118 for_each_possible_cpu(i)
119 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
120 err = sprintf(buf, "%lu\n", count);
121 } else
122 err = -ENODEV;
123 mutex_unlock(&dma_list_mutex);
124
125 return err;
126 }
127
128 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
129 {
130 struct dma_chan *chan;
131 int err;
132
133 mutex_lock(&dma_list_mutex);
134 chan = dev_to_dma_chan(dev);
135 if (chan)
136 err = sprintf(buf, "%d\n", chan->client_count);
137 else
138 err = -ENODEV;
139 mutex_unlock(&dma_list_mutex);
140
141 return err;
142 }
143
144 static struct device_attribute dma_attrs[] = {
145 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
146 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
147 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
148 __ATTR_NULL
149 };
150
151 static void chan_dev_release(struct device *dev)
152 {
153 struct dma_chan_dev *chan_dev;
154
155 chan_dev = container_of(dev, typeof(*chan_dev), device);
156 if (atomic_dec_and_test(chan_dev->idr_ref)) {
157 mutex_lock(&dma_list_mutex);
158 idr_remove(&dma_idr, chan_dev->dev_id);
159 mutex_unlock(&dma_list_mutex);
160 kfree(chan_dev->idr_ref);
161 }
162 kfree(chan_dev);
163 }
164
165 static struct class dma_devclass = {
166 .name = "dma",
167 .dev_attrs = dma_attrs,
168 .dev_release = chan_dev_release,
169 };
170
171 /* --- client and device registration --- */
172
173 #define dma_device_satisfies_mask(device, mask) \
174 __dma_device_satisfies_mask((device), &(mask))
175 static int
176 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
177 {
178 dma_cap_mask_t has;
179
180 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
181 DMA_TX_TYPE_END);
182 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
183 }
184
185 static struct module *dma_chan_to_owner(struct dma_chan *chan)
186 {
187 return chan->device->dev->driver->owner;
188 }
189
190 /**
191 * balance_ref_count - catch up the channel reference count
192 * @chan - channel to balance ->client_count versus dmaengine_ref_count
193 *
194 * balance_ref_count must be called under dma_list_mutex
195 */
196 static void balance_ref_count(struct dma_chan *chan)
197 {
198 struct module *owner = dma_chan_to_owner(chan);
199
200 while (chan->client_count < dmaengine_ref_count) {
201 __module_get(owner);
202 chan->client_count++;
203 }
204 }
205
206 /**
207 * dma_chan_get - try to grab a dma channel's parent driver module
208 * @chan - channel to grab
209 *
210 * Must be called under dma_list_mutex
211 */
212 static int dma_chan_get(struct dma_chan *chan)
213 {
214 int err = -ENODEV;
215 struct module *owner = dma_chan_to_owner(chan);
216
217 if (chan->client_count) {
218 __module_get(owner);
219 err = 0;
220 } else if (try_module_get(owner))
221 err = 0;
222
223 if (err == 0)
224 chan->client_count++;
225
226 /* allocate upon first client reference */
227 if (chan->client_count == 1 && err == 0) {
228 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
229
230 if (desc_cnt < 0) {
231 err = desc_cnt;
232 chan->client_count = 0;
233 module_put(owner);
234 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
235 balance_ref_count(chan);
236 }
237
238 return err;
239 }
240
241 /**
242 * dma_chan_put - drop a reference to a dma channel's parent driver module
243 * @chan - channel to release
244 *
245 * Must be called under dma_list_mutex
246 */
247 static void dma_chan_put(struct dma_chan *chan)
248 {
249 if (!chan->client_count)
250 return; /* this channel failed alloc_chan_resources */
251 chan->client_count--;
252 module_put(dma_chan_to_owner(chan));
253 if (chan->client_count == 0)
254 chan->device->device_free_chan_resources(chan);
255 }
256
257 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
258 {
259 enum dma_status status;
260 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
261
262 dma_async_issue_pending(chan);
263 do {
264 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
265 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
266 pr_err("%s: timeout!\n", __func__);
267 return DMA_ERROR;
268 }
269 } while (status == DMA_IN_PROGRESS);
270
271 return status;
272 }
273 EXPORT_SYMBOL(dma_sync_wait);
274
275 /**
276 * dma_cap_mask_all - enable iteration over all operation types
277 */
278 static dma_cap_mask_t dma_cap_mask_all;
279
280 /**
281 * dma_chan_tbl_ent - tracks channel allocations per core/operation
282 * @chan - associated channel for this entry
283 */
284 struct dma_chan_tbl_ent {
285 struct dma_chan *chan;
286 };
287
288 /**
289 * channel_table - percpu lookup table for memory-to-memory offload providers
290 */
291 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
292
293 static int __init dma_channel_table_init(void)
294 {
295 enum dma_transaction_type cap;
296 int err = 0;
297
298 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
299
300 /* 'interrupt', 'private', and 'slave' are channel capabilities,
301 * but are not associated with an operation so they do not need
302 * an entry in the channel_table
303 */
304 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
305 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
306 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
307
308 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
309 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
310 if (!channel_table[cap]) {
311 err = -ENOMEM;
312 break;
313 }
314 }
315
316 if (err) {
317 pr_err("initialization failure\n");
318 for_each_dma_cap_mask(cap, dma_cap_mask_all)
319 if (channel_table[cap])
320 free_percpu(channel_table[cap]);
321 }
322
323 return err;
324 }
325 arch_initcall(dma_channel_table_init);
326
327 /**
328 * dma_find_channel - find a channel to carry out the operation
329 * @tx_type: transaction type
330 */
331 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
332 {
333 return this_cpu_read(channel_table[tx_type]->chan);
334 }
335 EXPORT_SYMBOL(dma_find_channel);
336
337 /*
338 * net_dma_find_channel - find a channel for net_dma
339 * net_dma has alignment requirements
340 */
341 struct dma_chan *net_dma_find_channel(void)
342 {
343 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
344 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
345 return NULL;
346
347 return chan;
348 }
349 EXPORT_SYMBOL(net_dma_find_channel);
350
351 /**
352 * dma_issue_pending_all - flush all pending operations across all channels
353 */
354 void dma_issue_pending_all(void)
355 {
356 struct dma_device *device;
357 struct dma_chan *chan;
358
359 rcu_read_lock();
360 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
361 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
362 continue;
363 list_for_each_entry(chan, &device->channels, device_node)
364 if (chan->client_count)
365 device->device_issue_pending(chan);
366 }
367 rcu_read_unlock();
368 }
369 EXPORT_SYMBOL(dma_issue_pending_all);
370
371 /**
372 * nth_chan - returns the nth channel of the given capability
373 * @cap: capability to match
374 * @n: nth channel desired
375 *
376 * Defaults to returning the channel with the desired capability and the
377 * lowest reference count when 'n' cannot be satisfied. Must be called
378 * under dma_list_mutex.
379 */
380 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
381 {
382 struct dma_device *device;
383 struct dma_chan *chan;
384 struct dma_chan *ret = NULL;
385 struct dma_chan *min = NULL;
386
387 list_for_each_entry(device, &dma_device_list, global_node) {
388 if (!dma_has_cap(cap, device->cap_mask) ||
389 dma_has_cap(DMA_PRIVATE, device->cap_mask))
390 continue;
391 list_for_each_entry(chan, &device->channels, device_node) {
392 if (!chan->client_count)
393 continue;
394 if (!min)
395 min = chan;
396 else if (chan->table_count < min->table_count)
397 min = chan;
398
399 if (n-- == 0) {
400 ret = chan;
401 break; /* done */
402 }
403 }
404 if (ret)
405 break; /* done */
406 }
407
408 if (!ret)
409 ret = min;
410
411 if (ret)
412 ret->table_count++;
413
414 return ret;
415 }
416
417 /**
418 * dma_channel_rebalance - redistribute the available channels
419 *
420 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
421 * operation type) in the SMP case, and operation isolation (avoid
422 * multi-tasking channels) in the non-SMP case. Must be called under
423 * dma_list_mutex.
424 */
425 static void dma_channel_rebalance(void)
426 {
427 struct dma_chan *chan;
428 struct dma_device *device;
429 int cpu;
430 int cap;
431 int n;
432
433 /* undo the last distribution */
434 for_each_dma_cap_mask(cap, dma_cap_mask_all)
435 for_each_possible_cpu(cpu)
436 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
437
438 list_for_each_entry(device, &dma_device_list, global_node) {
439 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
440 continue;
441 list_for_each_entry(chan, &device->channels, device_node)
442 chan->table_count = 0;
443 }
444
445 /* don't populate the channel_table if no clients are available */
446 if (!dmaengine_ref_count)
447 return;
448
449 /* redistribute available channels */
450 n = 0;
451 for_each_dma_cap_mask(cap, dma_cap_mask_all)
452 for_each_online_cpu(cpu) {
453 if (num_possible_cpus() > 1)
454 chan = nth_chan(cap, n++);
455 else
456 chan = nth_chan(cap, -1);
457
458 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
459 }
460 }
461
462 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
463 dma_filter_fn fn, void *fn_param)
464 {
465 struct dma_chan *chan;
466
467 if (!__dma_device_satisfies_mask(dev, mask)) {
468 pr_debug("%s: wrong capabilities\n", __func__);
469 return NULL;
470 }
471 /* devices with multiple channels need special handling as we need to
472 * ensure that all channels are either private or public.
473 */
474 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
475 list_for_each_entry(chan, &dev->channels, device_node) {
476 /* some channels are already publicly allocated */
477 if (chan->client_count)
478 return NULL;
479 }
480
481 list_for_each_entry(chan, &dev->channels, device_node) {
482 if (chan->client_count) {
483 pr_debug("%s: %s busy\n",
484 __func__, dma_chan_name(chan));
485 continue;
486 }
487 if (fn && !fn(chan, fn_param)) {
488 pr_debug("%s: %s filter said false\n",
489 __func__, dma_chan_name(chan));
490 continue;
491 }
492 return chan;
493 }
494
495 return NULL;
496 }
497
498 /**
499 * dma_request_channel - try to allocate an exclusive channel
500 * @mask: capabilities that the channel must satisfy
501 * @fn: optional callback to disposition available channels
502 * @fn_param: opaque parameter to pass to dma_filter_fn
503 */
504 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
505 {
506 struct dma_device *device, *_d;
507 struct dma_chan *chan = NULL;
508 int err;
509
510 /* Find a channel */
511 mutex_lock(&dma_list_mutex);
512 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
513 chan = private_candidate(mask, device, fn, fn_param);
514 if (chan) {
515 /* Found a suitable channel, try to grab, prep, and
516 * return it. We first set DMA_PRIVATE to disable
517 * balance_ref_count as this channel will not be
518 * published in the general-purpose allocator
519 */
520 dma_cap_set(DMA_PRIVATE, device->cap_mask);
521 device->privatecnt++;
522 err = dma_chan_get(chan);
523
524 if (err == -ENODEV) {
525 pr_debug("%s: %s module removed\n",
526 __func__, dma_chan_name(chan));
527 list_del_rcu(&device->global_node);
528 } else if (err)
529 pr_debug("%s: failed to get %s: (%d)\n",
530 __func__, dma_chan_name(chan), err);
531 else
532 break;
533 if (--device->privatecnt == 0)
534 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
535 chan = NULL;
536 }
537 }
538 mutex_unlock(&dma_list_mutex);
539
540 pr_debug("%s: %s (%s)\n",
541 __func__,
542 chan ? "success" : "fail",
543 chan ? dma_chan_name(chan) : NULL);
544
545 return chan;
546 }
547 EXPORT_SYMBOL_GPL(__dma_request_channel);
548
549 void dma_release_channel(struct dma_chan *chan)
550 {
551 mutex_lock(&dma_list_mutex);
552 WARN_ONCE(chan->client_count != 1,
553 "chan reference count %d != 1\n", chan->client_count);
554 dma_chan_put(chan);
555 /* drop PRIVATE cap enabled by __dma_request_channel() */
556 if (--chan->device->privatecnt == 0)
557 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
558 mutex_unlock(&dma_list_mutex);
559 }
560 EXPORT_SYMBOL_GPL(dma_release_channel);
561
562 /**
563 * dmaengine_get - register interest in dma_channels
564 */
565 void dmaengine_get(void)
566 {
567 struct dma_device *device, *_d;
568 struct dma_chan *chan;
569 int err;
570
571 mutex_lock(&dma_list_mutex);
572 dmaengine_ref_count++;
573
574 /* try to grab channels */
575 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
576 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
577 continue;
578 list_for_each_entry(chan, &device->channels, device_node) {
579 err = dma_chan_get(chan);
580 if (err == -ENODEV) {
581 /* module removed before we could use it */
582 list_del_rcu(&device->global_node);
583 break;
584 } else if (err)
585 pr_err("%s: failed to get %s: (%d)\n",
586 __func__, dma_chan_name(chan), err);
587 }
588 }
589
590 /* if this is the first reference and there were channels
591 * waiting we need to rebalance to get those channels
592 * incorporated into the channel table
593 */
594 if (dmaengine_ref_count == 1)
595 dma_channel_rebalance();
596 mutex_unlock(&dma_list_mutex);
597 }
598 EXPORT_SYMBOL(dmaengine_get);
599
600 /**
601 * dmaengine_put - let dma drivers be removed when ref_count == 0
602 */
603 void dmaengine_put(void)
604 {
605 struct dma_device *device;
606 struct dma_chan *chan;
607
608 mutex_lock(&dma_list_mutex);
609 dmaengine_ref_count--;
610 BUG_ON(dmaengine_ref_count < 0);
611 /* drop channel references */
612 list_for_each_entry(device, &dma_device_list, global_node) {
613 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
614 continue;
615 list_for_each_entry(chan, &device->channels, device_node)
616 dma_chan_put(chan);
617 }
618 mutex_unlock(&dma_list_mutex);
619 }
620 EXPORT_SYMBOL(dmaengine_put);
621
622 static bool device_has_all_tx_types(struct dma_device *device)
623 {
624 /* A device that satisfies this test has channels that will never cause
625 * an async_tx channel switch event as all possible operation types can
626 * be handled.
627 */
628 #ifdef CONFIG_ASYNC_TX_DMA
629 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
630 return false;
631 #endif
632
633 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
634 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
635 return false;
636 #endif
637
638 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
639 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
640 return false;
641 #endif
642
643 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
644 if (!dma_has_cap(DMA_XOR, device->cap_mask))
645 return false;
646
647 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
648 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
649 return false;
650 #endif
651 #endif
652
653 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
654 if (!dma_has_cap(DMA_PQ, device->cap_mask))
655 return false;
656
657 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
658 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
659 return false;
660 #endif
661 #endif
662
663 return true;
664 }
665
666 static int get_dma_id(struct dma_device *device)
667 {
668 int rc;
669
670 idr_retry:
671 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
672 return -ENOMEM;
673 mutex_lock(&dma_list_mutex);
674 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
675 mutex_unlock(&dma_list_mutex);
676 if (rc == -EAGAIN)
677 goto idr_retry;
678 else if (rc != 0)
679 return rc;
680
681 return 0;
682 }
683
684 /**
685 * dma_async_device_register - registers DMA devices found
686 * @device: &dma_device
687 */
688 int dma_async_device_register(struct dma_device *device)
689 {
690 int chancnt = 0, rc;
691 struct dma_chan* chan;
692 atomic_t *idr_ref;
693
694 if (!device)
695 return -ENODEV;
696
697 /* validate device routines */
698 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
699 !device->device_prep_dma_memcpy);
700 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
701 !device->device_prep_dma_xor);
702 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
703 !device->device_prep_dma_xor_val);
704 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
705 !device->device_prep_dma_pq);
706 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
707 !device->device_prep_dma_pq_val);
708 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
709 !device->device_prep_dma_memset);
710 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
711 !device->device_prep_dma_interrupt);
712 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
713 !device->device_prep_dma_sg);
714 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
715 !device->device_prep_dma_cyclic);
716 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
717 !device->device_control);
718 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
719 !device->device_prep_interleaved_dma);
720
721 BUG_ON(!device->device_alloc_chan_resources);
722 BUG_ON(!device->device_free_chan_resources);
723 BUG_ON(!device->device_tx_status);
724 BUG_ON(!device->device_issue_pending);
725 BUG_ON(!device->dev);
726
727 /* note: this only matters in the
728 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
729 */
730 if (device_has_all_tx_types(device))
731 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
732
733 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
734 if (!idr_ref)
735 return -ENOMEM;
736 rc = get_dma_id(device);
737 if (rc != 0) {
738 kfree(idr_ref);
739 return rc;
740 }
741
742 atomic_set(idr_ref, 0);
743
744 /* represent channels in sysfs. Probably want devs too */
745 list_for_each_entry(chan, &device->channels, device_node) {
746 rc = -ENOMEM;
747 chan->local = alloc_percpu(typeof(*chan->local));
748 if (chan->local == NULL)
749 goto err_out;
750 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
751 if (chan->dev == NULL) {
752 free_percpu(chan->local);
753 chan->local = NULL;
754 goto err_out;
755 }
756
757 chan->chan_id = chancnt++;
758 chan->dev->device.class = &dma_devclass;
759 chan->dev->device.parent = device->dev;
760 chan->dev->chan = chan;
761 chan->dev->idr_ref = idr_ref;
762 chan->dev->dev_id = device->dev_id;
763 atomic_inc(idr_ref);
764 dev_set_name(&chan->dev->device, "dma%dchan%d",
765 device->dev_id, chan->chan_id);
766
767 rc = device_register(&chan->dev->device);
768 if (rc) {
769 free_percpu(chan->local);
770 chan->local = NULL;
771 kfree(chan->dev);
772 atomic_dec(idr_ref);
773 goto err_out;
774 }
775 chan->client_count = 0;
776 }
777 device->chancnt = chancnt;
778
779 mutex_lock(&dma_list_mutex);
780 /* take references on public channels */
781 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
782 list_for_each_entry(chan, &device->channels, device_node) {
783 /* if clients are already waiting for channels we need
784 * to take references on their behalf
785 */
786 if (dma_chan_get(chan) == -ENODEV) {
787 /* note we can only get here for the first
788 * channel as the remaining channels are
789 * guaranteed to get a reference
790 */
791 rc = -ENODEV;
792 mutex_unlock(&dma_list_mutex);
793 goto err_out;
794 }
795 }
796 list_add_tail_rcu(&device->global_node, &dma_device_list);
797 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
798 device->privatecnt++; /* Always private */
799 dma_channel_rebalance();
800 mutex_unlock(&dma_list_mutex);
801
802 return 0;
803
804 err_out:
805 /* if we never registered a channel just release the idr */
806 if (atomic_read(idr_ref) == 0) {
807 mutex_lock(&dma_list_mutex);
808 idr_remove(&dma_idr, device->dev_id);
809 mutex_unlock(&dma_list_mutex);
810 kfree(idr_ref);
811 return rc;
812 }
813
814 list_for_each_entry(chan, &device->channels, device_node) {
815 if (chan->local == NULL)
816 continue;
817 mutex_lock(&dma_list_mutex);
818 chan->dev->chan = NULL;
819 mutex_unlock(&dma_list_mutex);
820 device_unregister(&chan->dev->device);
821 free_percpu(chan->local);
822 }
823 return rc;
824 }
825 EXPORT_SYMBOL(dma_async_device_register);
826
827 /**
828 * dma_async_device_unregister - unregister a DMA device
829 * @device: &dma_device
830 *
831 * This routine is called by dma driver exit routines, dmaengine holds module
832 * references to prevent it being called while channels are in use.
833 */
834 void dma_async_device_unregister(struct dma_device *device)
835 {
836 struct dma_chan *chan;
837
838 mutex_lock(&dma_list_mutex);
839 list_del_rcu(&device->global_node);
840 dma_channel_rebalance();
841 mutex_unlock(&dma_list_mutex);
842
843 list_for_each_entry(chan, &device->channels, device_node) {
844 WARN_ONCE(chan->client_count,
845 "%s called while %d clients hold a reference\n",
846 __func__, chan->client_count);
847 mutex_lock(&dma_list_mutex);
848 chan->dev->chan = NULL;
849 mutex_unlock(&dma_list_mutex);
850 device_unregister(&chan->dev->device);
851 free_percpu(chan->local);
852 }
853 }
854 EXPORT_SYMBOL(dma_async_device_unregister);
855
856 /**
857 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
858 * @chan: DMA channel to offload copy to
859 * @dest: destination address (virtual)
860 * @src: source address (virtual)
861 * @len: length
862 *
863 * Both @dest and @src must be mappable to a bus address according to the
864 * DMA mapping API rules for streaming mappings.
865 * Both @dest and @src must stay memory resident (kernel memory or locked
866 * user space pages).
867 */
868 dma_cookie_t
869 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
870 void *src, size_t len)
871 {
872 struct dma_device *dev = chan->device;
873 struct dma_async_tx_descriptor *tx;
874 dma_addr_t dma_dest, dma_src;
875 dma_cookie_t cookie;
876 unsigned long flags;
877
878 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
879 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
880 flags = DMA_CTRL_ACK |
881 DMA_COMPL_SRC_UNMAP_SINGLE |
882 DMA_COMPL_DEST_UNMAP_SINGLE;
883 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
884
885 if (!tx) {
886 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
887 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
888 return -ENOMEM;
889 }
890
891 tx->callback = NULL;
892 cookie = tx->tx_submit(tx);
893
894 preempt_disable();
895 __this_cpu_add(chan->local->bytes_transferred, len);
896 __this_cpu_inc(chan->local->memcpy_count);
897 preempt_enable();
898
899 return cookie;
900 }
901 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
902
903 /**
904 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
905 * @chan: DMA channel to offload copy to
906 * @page: destination page
907 * @offset: offset in page to copy to
908 * @kdata: source address (virtual)
909 * @len: length
910 *
911 * Both @page/@offset and @kdata must be mappable to a bus address according
912 * to the DMA mapping API rules for streaming mappings.
913 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
914 * locked user space pages)
915 */
916 dma_cookie_t
917 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
918 unsigned int offset, void *kdata, size_t len)
919 {
920 struct dma_device *dev = chan->device;
921 struct dma_async_tx_descriptor *tx;
922 dma_addr_t dma_dest, dma_src;
923 dma_cookie_t cookie;
924 unsigned long flags;
925
926 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
927 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
928 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
929 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
930
931 if (!tx) {
932 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
933 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
934 return -ENOMEM;
935 }
936
937 tx->callback = NULL;
938 cookie = tx->tx_submit(tx);
939
940 preempt_disable();
941 __this_cpu_add(chan->local->bytes_transferred, len);
942 __this_cpu_inc(chan->local->memcpy_count);
943 preempt_enable();
944
945 return cookie;
946 }
947 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
948
949 /**
950 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
951 * @chan: DMA channel to offload copy to
952 * @dest_pg: destination page
953 * @dest_off: offset in page to copy to
954 * @src_pg: source page
955 * @src_off: offset in page to copy from
956 * @len: length
957 *
958 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
959 * address according to the DMA mapping API rules for streaming mappings.
960 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
961 * (kernel memory or locked user space pages).
962 */
963 dma_cookie_t
964 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
965 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
966 size_t len)
967 {
968 struct dma_device *dev = chan->device;
969 struct dma_async_tx_descriptor *tx;
970 dma_addr_t dma_dest, dma_src;
971 dma_cookie_t cookie;
972 unsigned long flags;
973
974 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
975 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
976 DMA_FROM_DEVICE);
977 flags = DMA_CTRL_ACK;
978 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
979
980 if (!tx) {
981 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
982 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
983 return -ENOMEM;
984 }
985
986 tx->callback = NULL;
987 cookie = tx->tx_submit(tx);
988
989 preempt_disable();
990 __this_cpu_add(chan->local->bytes_transferred, len);
991 __this_cpu_inc(chan->local->memcpy_count);
992 preempt_enable();
993
994 return cookie;
995 }
996 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
997
998 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
999 struct dma_chan *chan)
1000 {
1001 tx->chan = chan;
1002 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1003 spin_lock_init(&tx->lock);
1004 #endif
1005 }
1006 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1007
1008 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1009 * @tx: in-flight transaction to wait on
1010 */
1011 enum dma_status
1012 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1013 {
1014 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1015
1016 if (!tx)
1017 return DMA_SUCCESS;
1018
1019 while (tx->cookie == -EBUSY) {
1020 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1021 pr_err("%s timeout waiting for descriptor submission\n",
1022 __func__);
1023 return DMA_ERROR;
1024 }
1025 cpu_relax();
1026 }
1027 return dma_sync_wait(tx->chan, tx->cookie);
1028 }
1029 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1030
1031 /* dma_run_dependencies - helper routine for dma drivers to process
1032 * (start) dependent operations on their target channel
1033 * @tx: transaction with dependencies
1034 */
1035 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1036 {
1037 struct dma_async_tx_descriptor *dep = txd_next(tx);
1038 struct dma_async_tx_descriptor *dep_next;
1039 struct dma_chan *chan;
1040
1041 if (!dep)
1042 return;
1043
1044 /* we'll submit tx->next now, so clear the link */
1045 txd_clear_next(tx);
1046 chan = dep->chan;
1047
1048 /* keep submitting up until a channel switch is detected
1049 * in that case we will be called again as a result of
1050 * processing the interrupt from async_tx_channel_switch
1051 */
1052 for (; dep; dep = dep_next) {
1053 txd_lock(dep);
1054 txd_clear_parent(dep);
1055 dep_next = txd_next(dep);
1056 if (dep_next && dep_next->chan == chan)
1057 txd_clear_next(dep); /* ->next will be submitted */
1058 else
1059 dep_next = NULL; /* submit current dep and terminate */
1060 txd_unlock(dep);
1061
1062 dep->tx_submit(dep);
1063 }
1064
1065 chan->device->device_issue_pending(chan);
1066 }
1067 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1068
1069 static int __init dma_bus_init(void)
1070 {
1071 return class_register(&dma_devclass);
1072 }
1073 arch_initcall(dma_bus_init);
1074
1075
This page took 0.069201 seconds and 5 git commands to generate.