dmaengine: at_hdmac: extend hardware handshaking interface identification
[deliverable/linux.git] / drivers / dma / at_hdmac.c
1 /*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
15 */
16
17 #include <linux/clk.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/of_dma.h>
28
29 #include "at_hdmac_regs.h"
30 #include "dmaengine.h"
31
32 /*
33 * Glossary
34 * --------
35 *
36 * at_hdmac : Name of the ATmel AHB DMA Controller
37 * at_dma_ / atdma : ATmel DMA controller entity related
38 * atc_ / atchan : ATmel DMA Channel entity related
39 */
40
41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
43 |ATC_DIF(AT_DMA_MEM_IF))
44
45 /*
46 * Initial number of descriptors to allocate for each channel. This could
47 * be increased during dma usage.
48 */
49 static unsigned int init_nr_desc_per_channel = 64;
50 module_param(init_nr_desc_per_channel, uint, 0644);
51 MODULE_PARM_DESC(init_nr_desc_per_channel,
52 "initial descriptors per channel (default: 64)");
53
54
55 /* prototypes */
56 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
57
58
59 /*----------------------------------------------------------------------*/
60
61 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
62 {
63 return list_first_entry(&atchan->active_list,
64 struct at_desc, desc_node);
65 }
66
67 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
68 {
69 return list_first_entry(&atchan->queue,
70 struct at_desc, desc_node);
71 }
72
73 /**
74 * atc_alloc_descriptor - allocate and return an initialized descriptor
75 * @chan: the channel to allocate descriptors for
76 * @gfp_flags: GFP allocation flags
77 *
78 * Note: The ack-bit is positioned in the descriptor flag at creation time
79 * to make initial allocation more convenient. This bit will be cleared
80 * and control will be given to client at usage time (during
81 * preparation functions).
82 */
83 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
84 gfp_t gfp_flags)
85 {
86 struct at_desc *desc = NULL;
87 struct at_dma *atdma = to_at_dma(chan->device);
88 dma_addr_t phys;
89
90 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
91 if (desc) {
92 memset(desc, 0, sizeof(struct at_desc));
93 INIT_LIST_HEAD(&desc->tx_list);
94 dma_async_tx_descriptor_init(&desc->txd, chan);
95 /* txd.flags will be overwritten in prep functions */
96 desc->txd.flags = DMA_CTRL_ACK;
97 desc->txd.tx_submit = atc_tx_submit;
98 desc->txd.phys = phys;
99 }
100
101 return desc;
102 }
103
104 /**
105 * atc_desc_get - get an unused descriptor from free_list
106 * @atchan: channel we want a new descriptor for
107 */
108 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
109 {
110 struct at_desc *desc, *_desc;
111 struct at_desc *ret = NULL;
112 unsigned long flags;
113 unsigned int i = 0;
114 LIST_HEAD(tmp_list);
115
116 spin_lock_irqsave(&atchan->lock, flags);
117 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
118 i++;
119 if (async_tx_test_ack(&desc->txd)) {
120 list_del(&desc->desc_node);
121 ret = desc;
122 break;
123 }
124 dev_dbg(chan2dev(&atchan->chan_common),
125 "desc %p not ACKed\n", desc);
126 }
127 spin_unlock_irqrestore(&atchan->lock, flags);
128 dev_vdbg(chan2dev(&atchan->chan_common),
129 "scanned %u descriptors on freelist\n", i);
130
131 /* no more descriptor available in initial pool: create one more */
132 if (!ret) {
133 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
134 if (ret) {
135 spin_lock_irqsave(&atchan->lock, flags);
136 atchan->descs_allocated++;
137 spin_unlock_irqrestore(&atchan->lock, flags);
138 } else {
139 dev_err(chan2dev(&atchan->chan_common),
140 "not enough descriptors available\n");
141 }
142 }
143
144 return ret;
145 }
146
147 /**
148 * atc_desc_put - move a descriptor, including any children, to the free list
149 * @atchan: channel we work on
150 * @desc: descriptor, at the head of a chain, to move to free list
151 */
152 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
153 {
154 if (desc) {
155 struct at_desc *child;
156 unsigned long flags;
157
158 spin_lock_irqsave(&atchan->lock, flags);
159 list_for_each_entry(child, &desc->tx_list, desc_node)
160 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving child desc %p to freelist\n",
162 child);
163 list_splice_init(&desc->tx_list, &atchan->free_list);
164 dev_vdbg(chan2dev(&atchan->chan_common),
165 "moving desc %p to freelist\n", desc);
166 list_add(&desc->desc_node, &atchan->free_list);
167 spin_unlock_irqrestore(&atchan->lock, flags);
168 }
169 }
170
171 /**
172 * atc_desc_chain - build chain adding a descriptor
173 * @first: address of first descriptor of the chain
174 * @prev: address of previous descriptor of the chain
175 * @desc: descriptor to queue
176 *
177 * Called from prep_* functions
178 */
179 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
180 struct at_desc *desc)
181 {
182 if (!(*first)) {
183 *first = desc;
184 } else {
185 /* inform the HW lli about chaining */
186 (*prev)->lli.dscr = desc->txd.phys;
187 /* insert the link descriptor to the LD ring */
188 list_add_tail(&desc->desc_node,
189 &(*first)->tx_list);
190 }
191 *prev = desc;
192 }
193
194 /**
195 * atc_dostart - starts the DMA engine for real
196 * @atchan: the channel we want to start
197 * @first: first descriptor in the list we want to begin with
198 *
199 * Called with atchan->lock held and bh disabled
200 */
201 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
202 {
203 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
204
205 /* ASSERT: channel is idle */
206 if (atc_chan_is_enabled(atchan)) {
207 dev_err(chan2dev(&atchan->chan_common),
208 "BUG: Attempted to start non-idle channel\n");
209 dev_err(chan2dev(&atchan->chan_common),
210 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
211 channel_readl(atchan, SADDR),
212 channel_readl(atchan, DADDR),
213 channel_readl(atchan, CTRLA),
214 channel_readl(atchan, CTRLB),
215 channel_readl(atchan, DSCR));
216
217 /* The tasklet will hopefully advance the queue... */
218 return;
219 }
220
221 vdbg_dump_regs(atchan);
222
223 channel_writel(atchan, SADDR, 0);
224 channel_writel(atchan, DADDR, 0);
225 channel_writel(atchan, CTRLA, 0);
226 channel_writel(atchan, CTRLB, 0);
227 channel_writel(atchan, DSCR, first->txd.phys);
228 dma_writel(atdma, CHER, atchan->mask);
229
230 vdbg_dump_regs(atchan);
231 }
232
233 /**
234 * atc_chain_complete - finish work for one transaction chain
235 * @atchan: channel we work on
236 * @desc: descriptor at the head of the chain we want do complete
237 *
238 * Called with atchan->lock held and bh disabled */
239 static void
240 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
241 {
242 struct dma_async_tx_descriptor *txd = &desc->txd;
243
244 dev_vdbg(chan2dev(&atchan->chan_common),
245 "descriptor %u complete\n", txd->cookie);
246
247 /* mark the descriptor as complete for non cyclic cases only */
248 if (!atc_chan_is_cyclic(atchan))
249 dma_cookie_complete(txd);
250
251 /* move children to free_list */
252 list_splice_init(&desc->tx_list, &atchan->free_list);
253 /* move myself to free_list */
254 list_move(&desc->desc_node, &atchan->free_list);
255
256 /* unmap dma addresses (not on slave channels) */
257 if (!atchan->chan_common.private) {
258 struct device *parent = chan2parent(&atchan->chan_common);
259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
260 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
261 dma_unmap_single(parent,
262 desc->lli.daddr,
263 desc->len, DMA_FROM_DEVICE);
264 else
265 dma_unmap_page(parent,
266 desc->lli.daddr,
267 desc->len, DMA_FROM_DEVICE);
268 }
269 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
270 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
271 dma_unmap_single(parent,
272 desc->lli.saddr,
273 desc->len, DMA_TO_DEVICE);
274 else
275 dma_unmap_page(parent,
276 desc->lli.saddr,
277 desc->len, DMA_TO_DEVICE);
278 }
279 }
280
281 /* for cyclic transfers,
282 * no need to replay callback function while stopping */
283 if (!atc_chan_is_cyclic(atchan)) {
284 dma_async_tx_callback callback = txd->callback;
285 void *param = txd->callback_param;
286
287 /*
288 * The API requires that no submissions are done from a
289 * callback, so we don't need to drop the lock here
290 */
291 if (callback)
292 callback(param);
293 }
294
295 dma_run_dependencies(txd);
296 }
297
298 /**
299 * atc_complete_all - finish work for all transactions
300 * @atchan: channel to complete transactions for
301 *
302 * Eventually submit queued descriptors if any
303 *
304 * Assume channel is idle while calling this function
305 * Called with atchan->lock held and bh disabled
306 */
307 static void atc_complete_all(struct at_dma_chan *atchan)
308 {
309 struct at_desc *desc, *_desc;
310 LIST_HEAD(list);
311
312 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
313
314 /*
315 * Submit queued descriptors ASAP, i.e. before we go through
316 * the completed ones.
317 */
318 if (!list_empty(&atchan->queue))
319 atc_dostart(atchan, atc_first_queued(atchan));
320 /* empty active_list now it is completed */
321 list_splice_init(&atchan->active_list, &list);
322 /* empty queue list by moving descriptors (if any) to active_list */
323 list_splice_init(&atchan->queue, &atchan->active_list);
324
325 list_for_each_entry_safe(desc, _desc, &list, desc_node)
326 atc_chain_complete(atchan, desc);
327 }
328
329 /**
330 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
331 * @atchan: channel to be cleaned up
332 *
333 * Called with atchan->lock held and bh disabled
334 */
335 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
336 {
337 struct at_desc *desc, *_desc;
338 struct at_desc *child;
339
340 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
341
342 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
343 if (!(desc->lli.ctrla & ATC_DONE))
344 /* This one is currently in progress */
345 return;
346
347 list_for_each_entry(child, &desc->tx_list, desc_node)
348 if (!(child->lli.ctrla & ATC_DONE))
349 /* Currently in progress */
350 return;
351
352 /*
353 * No descriptors so far seem to be in progress, i.e.
354 * this chain must be done.
355 */
356 atc_chain_complete(atchan, desc);
357 }
358 }
359
360 /**
361 * atc_advance_work - at the end of a transaction, move forward
362 * @atchan: channel where the transaction ended
363 *
364 * Called with atchan->lock held and bh disabled
365 */
366 static void atc_advance_work(struct at_dma_chan *atchan)
367 {
368 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
369
370 if (atc_chan_is_enabled(atchan))
371 return;
372
373 if (list_empty(&atchan->active_list) ||
374 list_is_singular(&atchan->active_list)) {
375 atc_complete_all(atchan);
376 } else {
377 atc_chain_complete(atchan, atc_first_active(atchan));
378 /* advance work */
379 atc_dostart(atchan, atc_first_active(atchan));
380 }
381 }
382
383
384 /**
385 * atc_handle_error - handle errors reported by DMA controller
386 * @atchan: channel where error occurs
387 *
388 * Called with atchan->lock held and bh disabled
389 */
390 static void atc_handle_error(struct at_dma_chan *atchan)
391 {
392 struct at_desc *bad_desc;
393 struct at_desc *child;
394
395 /*
396 * The descriptor currently at the head of the active list is
397 * broked. Since we don't have any way to report errors, we'll
398 * just have to scream loudly and try to carry on.
399 */
400 bad_desc = atc_first_active(atchan);
401 list_del_init(&bad_desc->desc_node);
402
403 /* As we are stopped, take advantage to push queued descriptors
404 * in active_list */
405 list_splice_init(&atchan->queue, atchan->active_list.prev);
406
407 /* Try to restart the controller */
408 if (!list_empty(&atchan->active_list))
409 atc_dostart(atchan, atc_first_active(atchan));
410
411 /*
412 * KERN_CRITICAL may seem harsh, but since this only happens
413 * when someone submits a bad physical address in a
414 * descriptor, we should consider ourselves lucky that the
415 * controller flagged an error instead of scribbling over
416 * random memory locations.
417 */
418 dev_crit(chan2dev(&atchan->chan_common),
419 "Bad descriptor submitted for DMA!\n");
420 dev_crit(chan2dev(&atchan->chan_common),
421 " cookie: %d\n", bad_desc->txd.cookie);
422 atc_dump_lli(atchan, &bad_desc->lli);
423 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
424 atc_dump_lli(atchan, &child->lli);
425
426 /* Pretend the descriptor completed successfully */
427 atc_chain_complete(atchan, bad_desc);
428 }
429
430 /**
431 * atc_handle_cyclic - at the end of a period, run callback function
432 * @atchan: channel used for cyclic operations
433 *
434 * Called with atchan->lock held and bh disabled
435 */
436 static void atc_handle_cyclic(struct at_dma_chan *atchan)
437 {
438 struct at_desc *first = atc_first_active(atchan);
439 struct dma_async_tx_descriptor *txd = &first->txd;
440 dma_async_tx_callback callback = txd->callback;
441 void *param = txd->callback_param;
442
443 dev_vdbg(chan2dev(&atchan->chan_common),
444 "new cyclic period llp 0x%08x\n",
445 channel_readl(atchan, DSCR));
446
447 if (callback)
448 callback(param);
449 }
450
451 /*-- IRQ & Tasklet ---------------------------------------------------*/
452
453 static void atc_tasklet(unsigned long data)
454 {
455 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
456 unsigned long flags;
457
458 spin_lock_irqsave(&atchan->lock, flags);
459 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
460 atc_handle_error(atchan);
461 else if (atc_chan_is_cyclic(atchan))
462 atc_handle_cyclic(atchan);
463 else
464 atc_advance_work(atchan);
465
466 spin_unlock_irqrestore(&atchan->lock, flags);
467 }
468
469 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
470 {
471 struct at_dma *atdma = (struct at_dma *)dev_id;
472 struct at_dma_chan *atchan;
473 int i;
474 u32 status, pending, imr;
475 int ret = IRQ_NONE;
476
477 do {
478 imr = dma_readl(atdma, EBCIMR);
479 status = dma_readl(atdma, EBCISR);
480 pending = status & imr;
481
482 if (!pending)
483 break;
484
485 dev_vdbg(atdma->dma_common.dev,
486 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
487 status, imr, pending);
488
489 for (i = 0; i < atdma->dma_common.chancnt; i++) {
490 atchan = &atdma->chan[i];
491 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
492 if (pending & AT_DMA_ERR(i)) {
493 /* Disable channel on AHB error */
494 dma_writel(atdma, CHDR,
495 AT_DMA_RES(i) | atchan->mask);
496 /* Give information to tasklet */
497 set_bit(ATC_IS_ERROR, &atchan->status);
498 }
499 tasklet_schedule(&atchan->tasklet);
500 ret = IRQ_HANDLED;
501 }
502 }
503
504 } while (pending);
505
506 return ret;
507 }
508
509
510 /*-- DMA Engine API --------------------------------------------------*/
511
512 /**
513 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
514 * @desc: descriptor at the head of the transaction chain
515 *
516 * Queue chain if DMA engine is working already
517 *
518 * Cookie increment and adding to active_list or queue must be atomic
519 */
520 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
521 {
522 struct at_desc *desc = txd_to_at_desc(tx);
523 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
524 dma_cookie_t cookie;
525 unsigned long flags;
526
527 spin_lock_irqsave(&atchan->lock, flags);
528 cookie = dma_cookie_assign(tx);
529
530 if (list_empty(&atchan->active_list)) {
531 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
532 desc->txd.cookie);
533 atc_dostart(atchan, desc);
534 list_add_tail(&desc->desc_node, &atchan->active_list);
535 } else {
536 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
537 desc->txd.cookie);
538 list_add_tail(&desc->desc_node, &atchan->queue);
539 }
540
541 spin_unlock_irqrestore(&atchan->lock, flags);
542
543 return cookie;
544 }
545
546 /**
547 * atc_prep_dma_memcpy - prepare a memcpy operation
548 * @chan: the channel to prepare operation on
549 * @dest: operation virtual destination address
550 * @src: operation virtual source address
551 * @len: operation length
552 * @flags: tx descriptor status flags
553 */
554 static struct dma_async_tx_descriptor *
555 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
556 size_t len, unsigned long flags)
557 {
558 struct at_dma_chan *atchan = to_at_dma_chan(chan);
559 struct at_desc *desc = NULL;
560 struct at_desc *first = NULL;
561 struct at_desc *prev = NULL;
562 size_t xfer_count;
563 size_t offset;
564 unsigned int src_width;
565 unsigned int dst_width;
566 u32 ctrla;
567 u32 ctrlb;
568
569 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
570 dest, src, len, flags);
571
572 if (unlikely(!len)) {
573 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
574 return NULL;
575 }
576
577 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
578 | ATC_SRC_ADDR_MODE_INCR
579 | ATC_DST_ADDR_MODE_INCR
580 | ATC_FC_MEM2MEM;
581
582 /*
583 * We can be a lot more clever here, but this should take care
584 * of the most common optimization.
585 */
586 if (!((src | dest | len) & 3)) {
587 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
588 src_width = dst_width = 2;
589 } else if (!((src | dest | len) & 1)) {
590 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
591 src_width = dst_width = 1;
592 } else {
593 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
594 src_width = dst_width = 0;
595 }
596
597 for (offset = 0; offset < len; offset += xfer_count << src_width) {
598 xfer_count = min_t(size_t, (len - offset) >> src_width,
599 ATC_BTSIZE_MAX);
600
601 desc = atc_desc_get(atchan);
602 if (!desc)
603 goto err_desc_get;
604
605 desc->lli.saddr = src + offset;
606 desc->lli.daddr = dest + offset;
607 desc->lli.ctrla = ctrla | xfer_count;
608 desc->lli.ctrlb = ctrlb;
609
610 desc->txd.cookie = 0;
611
612 atc_desc_chain(&first, &prev, desc);
613 }
614
615 /* First descriptor of the chain embedds additional information */
616 first->txd.cookie = -EBUSY;
617 first->len = len;
618
619 /* set end-of-link to the last link descriptor of list*/
620 set_desc_eol(desc);
621
622 first->txd.flags = flags; /* client is in control of this ack */
623
624 return &first->txd;
625
626 err_desc_get:
627 atc_desc_put(atchan, first);
628 return NULL;
629 }
630
631
632 /**
633 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
634 * @chan: DMA channel
635 * @sgl: scatterlist to transfer to/from
636 * @sg_len: number of entries in @scatterlist
637 * @direction: DMA direction
638 * @flags: tx descriptor status flags
639 * @context: transaction context (ignored)
640 */
641 static struct dma_async_tx_descriptor *
642 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
643 unsigned int sg_len, enum dma_transfer_direction direction,
644 unsigned long flags, void *context)
645 {
646 struct at_dma_chan *atchan = to_at_dma_chan(chan);
647 struct at_dma_slave *atslave = chan->private;
648 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
649 struct at_desc *first = NULL;
650 struct at_desc *prev = NULL;
651 u32 ctrla;
652 u32 ctrlb;
653 dma_addr_t reg;
654 unsigned int reg_width;
655 unsigned int mem_width;
656 unsigned int i;
657 struct scatterlist *sg;
658 size_t total_len = 0;
659
660 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
661 sg_len,
662 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
663 flags);
664
665 if (unlikely(!atslave || !sg_len)) {
666 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
667 return NULL;
668 }
669
670 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
671 | ATC_DCSIZE(sconfig->dst_maxburst);
672 ctrlb = ATC_IEN;
673
674 switch (direction) {
675 case DMA_MEM_TO_DEV:
676 reg_width = convert_buswidth(sconfig->dst_addr_width);
677 ctrla |= ATC_DST_WIDTH(reg_width);
678 ctrlb |= ATC_DST_ADDR_MODE_FIXED
679 | ATC_SRC_ADDR_MODE_INCR
680 | ATC_FC_MEM2PER
681 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
682 reg = sconfig->dst_addr;
683 for_each_sg(sgl, sg, sg_len, i) {
684 struct at_desc *desc;
685 u32 len;
686 u32 mem;
687
688 desc = atc_desc_get(atchan);
689 if (!desc)
690 goto err_desc_get;
691
692 mem = sg_dma_address(sg);
693 len = sg_dma_len(sg);
694 if (unlikely(!len)) {
695 dev_dbg(chan2dev(chan),
696 "prep_slave_sg: sg(%d) data length is zero\n", i);
697 goto err;
698 }
699 mem_width = 2;
700 if (unlikely(mem & 3 || len & 3))
701 mem_width = 0;
702
703 desc->lli.saddr = mem;
704 desc->lli.daddr = reg;
705 desc->lli.ctrla = ctrla
706 | ATC_SRC_WIDTH(mem_width)
707 | len >> mem_width;
708 desc->lli.ctrlb = ctrlb;
709
710 atc_desc_chain(&first, &prev, desc);
711 total_len += len;
712 }
713 break;
714 case DMA_DEV_TO_MEM:
715 reg_width = convert_buswidth(sconfig->src_addr_width);
716 ctrla |= ATC_SRC_WIDTH(reg_width);
717 ctrlb |= ATC_DST_ADDR_MODE_INCR
718 | ATC_SRC_ADDR_MODE_FIXED
719 | ATC_FC_PER2MEM
720 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
721
722 reg = sconfig->src_addr;
723 for_each_sg(sgl, sg, sg_len, i) {
724 struct at_desc *desc;
725 u32 len;
726 u32 mem;
727
728 desc = atc_desc_get(atchan);
729 if (!desc)
730 goto err_desc_get;
731
732 mem = sg_dma_address(sg);
733 len = sg_dma_len(sg);
734 if (unlikely(!len)) {
735 dev_dbg(chan2dev(chan),
736 "prep_slave_sg: sg(%d) data length is zero\n", i);
737 goto err;
738 }
739 mem_width = 2;
740 if (unlikely(mem & 3 || len & 3))
741 mem_width = 0;
742
743 desc->lli.saddr = reg;
744 desc->lli.daddr = mem;
745 desc->lli.ctrla = ctrla
746 | ATC_DST_WIDTH(mem_width)
747 | len >> reg_width;
748 desc->lli.ctrlb = ctrlb;
749
750 atc_desc_chain(&first, &prev, desc);
751 total_len += len;
752 }
753 break;
754 default:
755 return NULL;
756 }
757
758 /* set end-of-link to the last link descriptor of list*/
759 set_desc_eol(prev);
760
761 /* First descriptor of the chain embedds additional information */
762 first->txd.cookie = -EBUSY;
763 first->len = total_len;
764
765 /* first link descriptor of list is responsible of flags */
766 first->txd.flags = flags; /* client is in control of this ack */
767
768 return &first->txd;
769
770 err_desc_get:
771 dev_err(chan2dev(chan), "not enough descriptors available\n");
772 err:
773 atc_desc_put(atchan, first);
774 return NULL;
775 }
776
777 /**
778 * atc_dma_cyclic_check_values
779 * Check for too big/unaligned periods and unaligned DMA buffer
780 */
781 static int
782 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
783 size_t period_len)
784 {
785 if (period_len > (ATC_BTSIZE_MAX << reg_width))
786 goto err_out;
787 if (unlikely(period_len & ((1 << reg_width) - 1)))
788 goto err_out;
789 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
790 goto err_out;
791
792 return 0;
793
794 err_out:
795 return -EINVAL;
796 }
797
798 /**
799 * atc_dma_cyclic_fill_desc - Fill one period descriptor
800 */
801 static int
802 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
803 unsigned int period_index, dma_addr_t buf_addr,
804 unsigned int reg_width, size_t period_len,
805 enum dma_transfer_direction direction)
806 {
807 struct at_dma_chan *atchan = to_at_dma_chan(chan);
808 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
809 u32 ctrla;
810
811 /* prepare common CRTLA value */
812 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
813 | ATC_DCSIZE(sconfig->dst_maxburst)
814 | ATC_DST_WIDTH(reg_width)
815 | ATC_SRC_WIDTH(reg_width)
816 | period_len >> reg_width;
817
818 switch (direction) {
819 case DMA_MEM_TO_DEV:
820 desc->lli.saddr = buf_addr + (period_len * period_index);
821 desc->lli.daddr = sconfig->dst_addr;
822 desc->lli.ctrla = ctrla;
823 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
824 | ATC_SRC_ADDR_MODE_INCR
825 | ATC_FC_MEM2PER
826 | ATC_SIF(atchan->mem_if)
827 | ATC_DIF(atchan->per_if);
828 break;
829
830 case DMA_DEV_TO_MEM:
831 desc->lli.saddr = sconfig->src_addr;
832 desc->lli.daddr = buf_addr + (period_len * period_index);
833 desc->lli.ctrla = ctrla;
834 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
835 | ATC_SRC_ADDR_MODE_FIXED
836 | ATC_FC_PER2MEM
837 | ATC_SIF(atchan->per_if)
838 | ATC_DIF(atchan->mem_if);
839 break;
840
841 default:
842 return -EINVAL;
843 }
844
845 return 0;
846 }
847
848 /**
849 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
850 * @chan: the DMA channel to prepare
851 * @buf_addr: physical DMA address where the buffer starts
852 * @buf_len: total number of bytes for the entire buffer
853 * @period_len: number of bytes for each period
854 * @direction: transfer direction, to or from device
855 * @flags: tx descriptor status flags
856 * @context: transfer context (ignored)
857 */
858 static struct dma_async_tx_descriptor *
859 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
860 size_t period_len, enum dma_transfer_direction direction,
861 unsigned long flags, void *context)
862 {
863 struct at_dma_chan *atchan = to_at_dma_chan(chan);
864 struct at_dma_slave *atslave = chan->private;
865 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
866 struct at_desc *first = NULL;
867 struct at_desc *prev = NULL;
868 unsigned long was_cyclic;
869 unsigned int reg_width;
870 unsigned int periods = buf_len / period_len;
871 unsigned int i;
872
873 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
874 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
875 buf_addr,
876 periods, buf_len, period_len);
877
878 if (unlikely(!atslave || !buf_len || !period_len)) {
879 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
880 return NULL;
881 }
882
883 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
884 if (was_cyclic) {
885 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
886 return NULL;
887 }
888
889 if (unlikely(!is_slave_direction(direction)))
890 goto err_out;
891
892 if (sconfig->direction == DMA_MEM_TO_DEV)
893 reg_width = convert_buswidth(sconfig->dst_addr_width);
894 else
895 reg_width = convert_buswidth(sconfig->src_addr_width);
896
897 /* Check for too big/unaligned periods and unaligned DMA buffer */
898 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
899 goto err_out;
900
901 /* build cyclic linked list */
902 for (i = 0; i < periods; i++) {
903 struct at_desc *desc;
904
905 desc = atc_desc_get(atchan);
906 if (!desc)
907 goto err_desc_get;
908
909 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
910 reg_width, period_len, direction))
911 goto err_desc_get;
912
913 atc_desc_chain(&first, &prev, desc);
914 }
915
916 /* lets make a cyclic list */
917 prev->lli.dscr = first->txd.phys;
918
919 /* First descriptor of the chain embedds additional information */
920 first->txd.cookie = -EBUSY;
921 first->len = buf_len;
922
923 return &first->txd;
924
925 err_desc_get:
926 dev_err(chan2dev(chan), "not enough descriptors available\n");
927 atc_desc_put(atchan, first);
928 err_out:
929 clear_bit(ATC_IS_CYCLIC, &atchan->status);
930 return NULL;
931 }
932
933 static int set_runtime_config(struct dma_chan *chan,
934 struct dma_slave_config *sconfig)
935 {
936 struct at_dma_chan *atchan = to_at_dma_chan(chan);
937
938 /* Check if it is chan is configured for slave transfers */
939 if (!chan->private)
940 return -EINVAL;
941
942 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
943
944 convert_burst(&atchan->dma_sconfig.src_maxburst);
945 convert_burst(&atchan->dma_sconfig.dst_maxburst);
946
947 return 0;
948 }
949
950
951 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
952 unsigned long arg)
953 {
954 struct at_dma_chan *atchan = to_at_dma_chan(chan);
955 struct at_dma *atdma = to_at_dma(chan->device);
956 int chan_id = atchan->chan_common.chan_id;
957 unsigned long flags;
958
959 LIST_HEAD(list);
960
961 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
962
963 if (cmd == DMA_PAUSE) {
964 spin_lock_irqsave(&atchan->lock, flags);
965
966 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
967 set_bit(ATC_IS_PAUSED, &atchan->status);
968
969 spin_unlock_irqrestore(&atchan->lock, flags);
970 } else if (cmd == DMA_RESUME) {
971 if (!atc_chan_is_paused(atchan))
972 return 0;
973
974 spin_lock_irqsave(&atchan->lock, flags);
975
976 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
977 clear_bit(ATC_IS_PAUSED, &atchan->status);
978
979 spin_unlock_irqrestore(&atchan->lock, flags);
980 } else if (cmd == DMA_TERMINATE_ALL) {
981 struct at_desc *desc, *_desc;
982 /*
983 * This is only called when something went wrong elsewhere, so
984 * we don't really care about the data. Just disable the
985 * channel. We still have to poll the channel enable bit due
986 * to AHB/HSB limitations.
987 */
988 spin_lock_irqsave(&atchan->lock, flags);
989
990 /* disabling channel: must also remove suspend state */
991 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
992
993 /* confirm that this channel is disabled */
994 while (dma_readl(atdma, CHSR) & atchan->mask)
995 cpu_relax();
996
997 /* active_list entries will end up before queued entries */
998 list_splice_init(&atchan->queue, &list);
999 list_splice_init(&atchan->active_list, &list);
1000
1001 /* Flush all pending and queued descriptors */
1002 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1003 atc_chain_complete(atchan, desc);
1004
1005 clear_bit(ATC_IS_PAUSED, &atchan->status);
1006 /* if channel dedicated to cyclic operations, free it */
1007 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1008
1009 spin_unlock_irqrestore(&atchan->lock, flags);
1010 } else if (cmd == DMA_SLAVE_CONFIG) {
1011 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1012 } else {
1013 return -ENXIO;
1014 }
1015
1016 return 0;
1017 }
1018
1019 /**
1020 * atc_tx_status - poll for transaction completion
1021 * @chan: DMA channel
1022 * @cookie: transaction identifier to check status of
1023 * @txstate: if not %NULL updated with transaction state
1024 *
1025 * If @txstate is passed in, upon return it reflect the driver
1026 * internal state and can be used with dma_async_is_complete() to check
1027 * the status of multiple cookies without re-checking hardware state.
1028 */
1029 static enum dma_status
1030 atc_tx_status(struct dma_chan *chan,
1031 dma_cookie_t cookie,
1032 struct dma_tx_state *txstate)
1033 {
1034 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1035 dma_cookie_t last_used;
1036 dma_cookie_t last_complete;
1037 unsigned long flags;
1038 enum dma_status ret;
1039
1040 spin_lock_irqsave(&atchan->lock, flags);
1041
1042 ret = dma_cookie_status(chan, cookie, txstate);
1043 if (ret != DMA_SUCCESS) {
1044 atc_cleanup_descriptors(atchan);
1045
1046 ret = dma_cookie_status(chan, cookie, txstate);
1047 }
1048
1049 last_complete = chan->completed_cookie;
1050 last_used = chan->cookie;
1051
1052 spin_unlock_irqrestore(&atchan->lock, flags);
1053
1054 if (ret != DMA_SUCCESS)
1055 dma_set_residue(txstate, atc_first_active(atchan)->len);
1056
1057 if (atc_chan_is_paused(atchan))
1058 ret = DMA_PAUSED;
1059
1060 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1061 ret, cookie, last_complete ? last_complete : 0,
1062 last_used ? last_used : 0);
1063
1064 return ret;
1065 }
1066
1067 /**
1068 * atc_issue_pending - try to finish work
1069 * @chan: target DMA channel
1070 */
1071 static void atc_issue_pending(struct dma_chan *chan)
1072 {
1073 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1074 unsigned long flags;
1075
1076 dev_vdbg(chan2dev(chan), "issue_pending\n");
1077
1078 /* Not needed for cyclic transfers */
1079 if (atc_chan_is_cyclic(atchan))
1080 return;
1081
1082 spin_lock_irqsave(&atchan->lock, flags);
1083 atc_advance_work(atchan);
1084 spin_unlock_irqrestore(&atchan->lock, flags);
1085 }
1086
1087 /**
1088 * atc_alloc_chan_resources - allocate resources for DMA channel
1089 * @chan: allocate descriptor resources for this channel
1090 * @client: current client requesting the channel be ready for requests
1091 *
1092 * return - the number of allocated descriptors
1093 */
1094 static int atc_alloc_chan_resources(struct dma_chan *chan)
1095 {
1096 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1097 struct at_dma *atdma = to_at_dma(chan->device);
1098 struct at_desc *desc;
1099 struct at_dma_slave *atslave;
1100 unsigned long flags;
1101 int i;
1102 u32 cfg;
1103 LIST_HEAD(tmp_list);
1104
1105 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1106
1107 /* ASSERT: channel is idle */
1108 if (atc_chan_is_enabled(atchan)) {
1109 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1110 return -EIO;
1111 }
1112
1113 cfg = ATC_DEFAULT_CFG;
1114
1115 atslave = chan->private;
1116 if (atslave) {
1117 /*
1118 * We need controller-specific data to set up slave
1119 * transfers.
1120 */
1121 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1122
1123 /* if cfg configuration specified take it instead of default */
1124 if (atslave->cfg)
1125 cfg = atslave->cfg;
1126 }
1127
1128 /* have we already been set up?
1129 * reconfigure channel but no need to reallocate descriptors */
1130 if (!list_empty(&atchan->free_list))
1131 return atchan->descs_allocated;
1132
1133 /* Allocate initial pool of descriptors */
1134 for (i = 0; i < init_nr_desc_per_channel; i++) {
1135 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1136 if (!desc) {
1137 dev_err(atdma->dma_common.dev,
1138 "Only %d initial descriptors\n", i);
1139 break;
1140 }
1141 list_add_tail(&desc->desc_node, &tmp_list);
1142 }
1143
1144 spin_lock_irqsave(&atchan->lock, flags);
1145 atchan->descs_allocated = i;
1146 list_splice(&tmp_list, &atchan->free_list);
1147 dma_cookie_init(chan);
1148 spin_unlock_irqrestore(&atchan->lock, flags);
1149
1150 /* channel parameters */
1151 channel_writel(atchan, CFG, cfg);
1152
1153 dev_dbg(chan2dev(chan),
1154 "alloc_chan_resources: allocated %d descriptors\n",
1155 atchan->descs_allocated);
1156
1157 return atchan->descs_allocated;
1158 }
1159
1160 /**
1161 * atc_free_chan_resources - free all channel resources
1162 * @chan: DMA channel
1163 */
1164 static void atc_free_chan_resources(struct dma_chan *chan)
1165 {
1166 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1167 struct at_dma *atdma = to_at_dma(chan->device);
1168 struct at_desc *desc, *_desc;
1169 LIST_HEAD(list);
1170
1171 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1172 atchan->descs_allocated);
1173
1174 /* ASSERT: channel is idle */
1175 BUG_ON(!list_empty(&atchan->active_list));
1176 BUG_ON(!list_empty(&atchan->queue));
1177 BUG_ON(atc_chan_is_enabled(atchan));
1178
1179 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1180 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1181 list_del(&desc->desc_node);
1182 /* free link descriptor */
1183 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1184 }
1185 list_splice_init(&atchan->free_list, &list);
1186 atchan->descs_allocated = 0;
1187 atchan->status = 0;
1188
1189 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1190 }
1191
1192 #ifdef CONFIG_OF
1193 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1194 {
1195 struct at_dma_slave *atslave = slave;
1196
1197 if (atslave->dma_dev == chan->device->dev) {
1198 chan->private = atslave;
1199 return true;
1200 } else {
1201 return false;
1202 }
1203 }
1204
1205 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1206 struct of_dma *of_dma)
1207 {
1208 struct dma_chan *chan;
1209 struct at_dma_chan *atchan;
1210 struct at_dma_slave *atslave;
1211 dma_cap_mask_t mask;
1212 unsigned int per_id;
1213 struct platform_device *dmac_pdev;
1214
1215 if (dma_spec->args_count != 2)
1216 return NULL;
1217
1218 dmac_pdev = of_find_device_by_node(dma_spec->np);
1219
1220 dma_cap_zero(mask);
1221 dma_cap_set(DMA_SLAVE, mask);
1222
1223 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1224 if (!atslave)
1225 return NULL;
1226 /*
1227 * We can fill both SRC_PER and DST_PER, one of these fields will be
1228 * ignored depending on DMA transfer direction.
1229 */
1230 per_id = dma_spec->args[1];
1231 atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
1232 | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
1233 | ATC_DST_PER_MSB(per_id)
1234 | ATC_SRC_PER_MSB(per_id)
1235 | ATC_SRC_PER(per_id);
1236 atslave->dma_dev = &dmac_pdev->dev;
1237
1238 chan = dma_request_channel(mask, at_dma_filter, atslave);
1239 if (!chan)
1240 return NULL;
1241
1242 atchan = to_at_dma_chan(chan);
1243 atchan->per_if = dma_spec->args[0] & 0xff;
1244 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1245
1246 return chan;
1247 }
1248 #else
1249 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1250 struct of_dma *of_dma)
1251 {
1252 return NULL;
1253 }
1254 #endif
1255
1256 /*-- Module Management -----------------------------------------------*/
1257
1258 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1259 static struct at_dma_platform_data at91sam9rl_config = {
1260 .nr_channels = 2,
1261 };
1262 static struct at_dma_platform_data at91sam9g45_config = {
1263 .nr_channels = 8,
1264 };
1265
1266 #if defined(CONFIG_OF)
1267 static const struct of_device_id atmel_dma_dt_ids[] = {
1268 {
1269 .compatible = "atmel,at91sam9rl-dma",
1270 .data = &at91sam9rl_config,
1271 }, {
1272 .compatible = "atmel,at91sam9g45-dma",
1273 .data = &at91sam9g45_config,
1274 }, {
1275 /* sentinel */
1276 }
1277 };
1278
1279 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1280 #endif
1281
1282 static const struct platform_device_id atdma_devtypes[] = {
1283 {
1284 .name = "at91sam9rl_dma",
1285 .driver_data = (unsigned long) &at91sam9rl_config,
1286 }, {
1287 .name = "at91sam9g45_dma",
1288 .driver_data = (unsigned long) &at91sam9g45_config,
1289 }, {
1290 /* sentinel */
1291 }
1292 };
1293
1294 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1295 struct platform_device *pdev)
1296 {
1297 if (pdev->dev.of_node) {
1298 const struct of_device_id *match;
1299 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1300 if (match == NULL)
1301 return NULL;
1302 return match->data;
1303 }
1304 return (struct at_dma_platform_data *)
1305 platform_get_device_id(pdev)->driver_data;
1306 }
1307
1308 /**
1309 * at_dma_off - disable DMA controller
1310 * @atdma: the Atmel HDAMC device
1311 */
1312 static void at_dma_off(struct at_dma *atdma)
1313 {
1314 dma_writel(atdma, EN, 0);
1315
1316 /* disable all interrupts */
1317 dma_writel(atdma, EBCIDR, -1L);
1318
1319 /* confirm that all channels are disabled */
1320 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1321 cpu_relax();
1322 }
1323
1324 static int __init at_dma_probe(struct platform_device *pdev)
1325 {
1326 struct resource *io;
1327 struct at_dma *atdma;
1328 size_t size;
1329 int irq;
1330 int err;
1331 int i;
1332 const struct at_dma_platform_data *plat_dat;
1333
1334 /* setup platform data for each SoC */
1335 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1336 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1337 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1338
1339 /* get DMA parameters from controller type */
1340 plat_dat = at_dma_get_driver_data(pdev);
1341 if (!plat_dat)
1342 return -ENODEV;
1343
1344 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1345 if (!io)
1346 return -EINVAL;
1347
1348 irq = platform_get_irq(pdev, 0);
1349 if (irq < 0)
1350 return irq;
1351
1352 size = sizeof(struct at_dma);
1353 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1354 atdma = kzalloc(size, GFP_KERNEL);
1355 if (!atdma)
1356 return -ENOMEM;
1357
1358 /* discover transaction capabilities */
1359 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1360 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1361
1362 size = resource_size(io);
1363 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1364 err = -EBUSY;
1365 goto err_kfree;
1366 }
1367
1368 atdma->regs = ioremap(io->start, size);
1369 if (!atdma->regs) {
1370 err = -ENOMEM;
1371 goto err_release_r;
1372 }
1373
1374 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1375 if (IS_ERR(atdma->clk)) {
1376 err = PTR_ERR(atdma->clk);
1377 goto err_clk;
1378 }
1379 clk_enable(atdma->clk);
1380
1381 /* force dma off, just in case */
1382 at_dma_off(atdma);
1383
1384 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1385 if (err)
1386 goto err_irq;
1387
1388 platform_set_drvdata(pdev, atdma);
1389
1390 /* create a pool of consistent memory blocks for hardware descriptors */
1391 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1392 &pdev->dev, sizeof(struct at_desc),
1393 4 /* word alignment */, 0);
1394 if (!atdma->dma_desc_pool) {
1395 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1396 err = -ENOMEM;
1397 goto err_pool_create;
1398 }
1399
1400 /* clear any pending interrupt */
1401 while (dma_readl(atdma, EBCISR))
1402 cpu_relax();
1403
1404 /* initialize channels related values */
1405 INIT_LIST_HEAD(&atdma->dma_common.channels);
1406 for (i = 0; i < plat_dat->nr_channels; i++) {
1407 struct at_dma_chan *atchan = &atdma->chan[i];
1408
1409 atchan->mem_if = AT_DMA_MEM_IF;
1410 atchan->per_if = AT_DMA_PER_IF;
1411 atchan->chan_common.device = &atdma->dma_common;
1412 dma_cookie_init(&atchan->chan_common);
1413 list_add_tail(&atchan->chan_common.device_node,
1414 &atdma->dma_common.channels);
1415
1416 atchan->ch_regs = atdma->regs + ch_regs(i);
1417 spin_lock_init(&atchan->lock);
1418 atchan->mask = 1 << i;
1419
1420 INIT_LIST_HEAD(&atchan->active_list);
1421 INIT_LIST_HEAD(&atchan->queue);
1422 INIT_LIST_HEAD(&atchan->free_list);
1423
1424 tasklet_init(&atchan->tasklet, atc_tasklet,
1425 (unsigned long)atchan);
1426 atc_enable_chan_irq(atdma, i);
1427 }
1428
1429 /* set base routines */
1430 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1431 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1432 atdma->dma_common.device_tx_status = atc_tx_status;
1433 atdma->dma_common.device_issue_pending = atc_issue_pending;
1434 atdma->dma_common.dev = &pdev->dev;
1435
1436 /* set prep routines based on capability */
1437 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1438 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1439
1440 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1441 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1442 /* controller can do slave DMA: can trigger cyclic transfers */
1443 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1444 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1445 atdma->dma_common.device_control = atc_control;
1446 }
1447
1448 dma_writel(atdma, EN, AT_DMA_ENABLE);
1449
1450 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1451 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1452 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1453 plat_dat->nr_channels);
1454
1455 dma_async_device_register(&atdma->dma_common);
1456
1457 /*
1458 * Do not return an error if the dmac node is not present in order to
1459 * not break the existing way of requesting channel with
1460 * dma_request_channel().
1461 */
1462 if (pdev->dev.of_node) {
1463 err = of_dma_controller_register(pdev->dev.of_node,
1464 at_dma_xlate, atdma);
1465 if (err) {
1466 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1467 goto err_of_dma_controller_register;
1468 }
1469 }
1470
1471 return 0;
1472
1473 err_of_dma_controller_register:
1474 dma_async_device_unregister(&atdma->dma_common);
1475 dma_pool_destroy(atdma->dma_desc_pool);
1476 err_pool_create:
1477 platform_set_drvdata(pdev, NULL);
1478 free_irq(platform_get_irq(pdev, 0), atdma);
1479 err_irq:
1480 clk_disable(atdma->clk);
1481 clk_put(atdma->clk);
1482 err_clk:
1483 iounmap(atdma->regs);
1484 atdma->regs = NULL;
1485 err_release_r:
1486 release_mem_region(io->start, size);
1487 err_kfree:
1488 kfree(atdma);
1489 return err;
1490 }
1491
1492 static int at_dma_remove(struct platform_device *pdev)
1493 {
1494 struct at_dma *atdma = platform_get_drvdata(pdev);
1495 struct dma_chan *chan, *_chan;
1496 struct resource *io;
1497
1498 at_dma_off(atdma);
1499 dma_async_device_unregister(&atdma->dma_common);
1500
1501 dma_pool_destroy(atdma->dma_desc_pool);
1502 platform_set_drvdata(pdev, NULL);
1503 free_irq(platform_get_irq(pdev, 0), atdma);
1504
1505 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1506 device_node) {
1507 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1508
1509 /* Disable interrupts */
1510 atc_disable_chan_irq(atdma, chan->chan_id);
1511 tasklet_disable(&atchan->tasklet);
1512
1513 tasklet_kill(&atchan->tasklet);
1514 list_del(&chan->device_node);
1515 }
1516
1517 clk_disable(atdma->clk);
1518 clk_put(atdma->clk);
1519
1520 iounmap(atdma->regs);
1521 atdma->regs = NULL;
1522
1523 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1524 release_mem_region(io->start, resource_size(io));
1525
1526 kfree(atdma);
1527
1528 return 0;
1529 }
1530
1531 static void at_dma_shutdown(struct platform_device *pdev)
1532 {
1533 struct at_dma *atdma = platform_get_drvdata(pdev);
1534
1535 at_dma_off(platform_get_drvdata(pdev));
1536 clk_disable(atdma->clk);
1537 }
1538
1539 static int at_dma_prepare(struct device *dev)
1540 {
1541 struct platform_device *pdev = to_platform_device(dev);
1542 struct at_dma *atdma = platform_get_drvdata(pdev);
1543 struct dma_chan *chan, *_chan;
1544
1545 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1546 device_node) {
1547 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1548 /* wait for transaction completion (except in cyclic case) */
1549 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1550 return -EAGAIN;
1551 }
1552 return 0;
1553 }
1554
1555 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1556 {
1557 struct dma_chan *chan = &atchan->chan_common;
1558
1559 /* Channel should be paused by user
1560 * do it anyway even if it is not done already */
1561 if (!atc_chan_is_paused(atchan)) {
1562 dev_warn(chan2dev(chan),
1563 "cyclic channel not paused, should be done by channel user\n");
1564 atc_control(chan, DMA_PAUSE, 0);
1565 }
1566
1567 /* now preserve additional data for cyclic operations */
1568 /* next descriptor address in the cyclic list */
1569 atchan->save_dscr = channel_readl(atchan, DSCR);
1570
1571 vdbg_dump_regs(atchan);
1572 }
1573
1574 static int at_dma_suspend_noirq(struct device *dev)
1575 {
1576 struct platform_device *pdev = to_platform_device(dev);
1577 struct at_dma *atdma = platform_get_drvdata(pdev);
1578 struct dma_chan *chan, *_chan;
1579
1580 /* preserve data */
1581 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1582 device_node) {
1583 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1584
1585 if (atc_chan_is_cyclic(atchan))
1586 atc_suspend_cyclic(atchan);
1587 atchan->save_cfg = channel_readl(atchan, CFG);
1588 }
1589 atdma->save_imr = dma_readl(atdma, EBCIMR);
1590
1591 /* disable DMA controller */
1592 at_dma_off(atdma);
1593 clk_disable(atdma->clk);
1594 return 0;
1595 }
1596
1597 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1598 {
1599 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1600
1601 /* restore channel status for cyclic descriptors list:
1602 * next descriptor in the cyclic list at the time of suspend */
1603 channel_writel(atchan, SADDR, 0);
1604 channel_writel(atchan, DADDR, 0);
1605 channel_writel(atchan, CTRLA, 0);
1606 channel_writel(atchan, CTRLB, 0);
1607 channel_writel(atchan, DSCR, atchan->save_dscr);
1608 dma_writel(atdma, CHER, atchan->mask);
1609
1610 /* channel pause status should be removed by channel user
1611 * We cannot take the initiative to do it here */
1612
1613 vdbg_dump_regs(atchan);
1614 }
1615
1616 static int at_dma_resume_noirq(struct device *dev)
1617 {
1618 struct platform_device *pdev = to_platform_device(dev);
1619 struct at_dma *atdma = platform_get_drvdata(pdev);
1620 struct dma_chan *chan, *_chan;
1621
1622 /* bring back DMA controller */
1623 clk_enable(atdma->clk);
1624 dma_writel(atdma, EN, AT_DMA_ENABLE);
1625
1626 /* clear any pending interrupt */
1627 while (dma_readl(atdma, EBCISR))
1628 cpu_relax();
1629
1630 /* restore saved data */
1631 dma_writel(atdma, EBCIER, atdma->save_imr);
1632 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1633 device_node) {
1634 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1635
1636 channel_writel(atchan, CFG, atchan->save_cfg);
1637 if (atc_chan_is_cyclic(atchan))
1638 atc_resume_cyclic(atchan);
1639 }
1640 return 0;
1641 }
1642
1643 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1644 .prepare = at_dma_prepare,
1645 .suspend_noirq = at_dma_suspend_noirq,
1646 .resume_noirq = at_dma_resume_noirq,
1647 };
1648
1649 static struct platform_driver at_dma_driver = {
1650 .remove = at_dma_remove,
1651 .shutdown = at_dma_shutdown,
1652 .id_table = atdma_devtypes,
1653 .driver = {
1654 .name = "at_hdmac",
1655 .pm = &at_dma_dev_pm_ops,
1656 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1657 },
1658 };
1659
1660 static int __init at_dma_init(void)
1661 {
1662 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1663 }
1664 subsys_initcall(at_dma_init);
1665
1666 static void __exit at_dma_exit(void)
1667 {
1668 platform_driver_unregister(&at_dma_driver);
1669 }
1670 module_exit(at_dma_exit);
1671
1672 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1673 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1674 MODULE_LICENSE("GPL");
1675 MODULE_ALIAS("platform:at_hdmac");
This page took 0.11509 seconds and 6 git commands to generate.