mmc: sdhci-acpi: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers
[deliverable/linux.git] / drivers / dma / ep93xx_dma.c
1 /*
2 * Driver for the Cirrus Logic EP93xx DMA Controller
3 *
4 * Copyright (C) 2011 Mika Westerberg
5 *
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8 *
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12 *
13 * This driver is based on dw_dmac and amba-pl08x drivers.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 */
20
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28
29 #include <linux/platform_data/dma-ep93xx.h>
30
31 #include "dmaengine.h"
32
33 /* M2P registers */
34 #define M2P_CONTROL 0x0000
35 #define M2P_CONTROL_STALLINT BIT(0)
36 #define M2P_CONTROL_NFBINT BIT(1)
37 #define M2P_CONTROL_CH_ERROR_INT BIT(3)
38 #define M2P_CONTROL_ENABLE BIT(4)
39 #define M2P_CONTROL_ICE BIT(6)
40
41 #define M2P_INTERRUPT 0x0004
42 #define M2P_INTERRUPT_STALL BIT(0)
43 #define M2P_INTERRUPT_NFB BIT(1)
44 #define M2P_INTERRUPT_ERROR BIT(3)
45
46 #define M2P_PPALLOC 0x0008
47 #define M2P_STATUS 0x000c
48
49 #define M2P_MAXCNT0 0x0020
50 #define M2P_BASE0 0x0024
51 #define M2P_MAXCNT1 0x0030
52 #define M2P_BASE1 0x0034
53
54 #define M2P_STATE_IDLE 0
55 #define M2P_STATE_STALL 1
56 #define M2P_STATE_ON 2
57 #define M2P_STATE_NEXT 3
58
59 /* M2M registers */
60 #define M2M_CONTROL 0x0000
61 #define M2M_CONTROL_DONEINT BIT(2)
62 #define M2M_CONTROL_ENABLE BIT(3)
63 #define M2M_CONTROL_START BIT(4)
64 #define M2M_CONTROL_DAH BIT(11)
65 #define M2M_CONTROL_SAH BIT(12)
66 #define M2M_CONTROL_PW_SHIFT 9
67 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71 #define M2M_CONTROL_TM_SHIFT 13
72 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74 #define M2M_CONTROL_NFBINT BIT(21)
75 #define M2M_CONTROL_RSS_SHIFT 22
76 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79 #define M2M_CONTROL_NO_HDSK BIT(24)
80 #define M2M_CONTROL_PWSC_SHIFT 25
81
82 #define M2M_INTERRUPT 0x0004
83 #define M2M_INTERRUPT_MASK 6
84
85 #define M2M_STATUS 0x000c
86 #define M2M_STATUS_CTL_SHIFT 1
87 #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88 #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89 #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90 #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91 #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92 #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93 #define M2M_STATUS_BUF_SHIFT 4
94 #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95 #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96 #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97 #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98 #define M2M_STATUS_DONE BIT(6)
99
100 #define M2M_BCR0 0x0010
101 #define M2M_BCR1 0x0014
102 #define M2M_SAR_BASE0 0x0018
103 #define M2M_SAR_BASE1 0x001c
104 #define M2M_DAR_BASE0 0x002c
105 #define M2M_DAR_BASE1 0x0030
106
107 #define DMA_MAX_CHAN_BYTES 0xffff
108 #define DMA_MAX_CHAN_DESCRIPTORS 32
109
110 struct ep93xx_dma_engine;
111
112 /**
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114 * @src_addr: source address of the transaction
115 * @dst_addr: destination address of the transaction
116 * @size: size of the transaction (in bytes)
117 * @complete: this descriptor is completed
118 * @txd: dmaengine API descriptor
119 * @tx_list: list of linked descriptors
120 * @node: link used for putting this into a channel queue
121 */
122 struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130 };
131
132 /**
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
135 * @edma: pointer to to the engine device
136 * @regs: memory mapped registers
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
140 * @lock: lock protecting the fields following
141 * @flags: flags for the channel
142 * @buffer: which buffer to use next (0/1)
143 * @active: flattened chain of descriptors currently being processed
144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 * is set via .device_config before slave operation is
148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register.
150 *
151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
152 * will have slightly different scheme here: @active points to a head of
153 * flattened DMA descriptor chain.
154 *
155 * @queue holds pending transactions. These are linked through the first
156 * descriptor in the chain. When a descriptor is moved to the @active queue,
157 * the first and chained descriptors are flattened into a single list.
158 *
159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
160 * necessary channel configuration information. For memcpy channels this must
161 * be %NULL.
162 */
163 struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170 /* protects the fields following */
171 spinlock_t lock;
172 unsigned long flags;
173 /* Channel is configured for cyclic transfers */
174 #define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182 };
183
184 /**
185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
186 * @dma_dev: holds the dmaengine device
187 * @m2m: is this an M2M or M2P device
188 * @hw_setup: method which sets the channel up for operation
189 * @hw_shutdown: shuts the channel down and flushes whatever is left
190 * @hw_submit: pushes active descriptor(s) to the hardware
191 * @hw_interrupt: handle the interrupt
192 * @num_channels: number of channels for this instance
193 * @channels: array of channels
194 *
195 * There is one instance of this struct for the M2P channels and one for the
196 * M2M channels. hw_xxx() methods are used to perform operations which are
197 * different on M2M and M2P channels. These methods are called with channel
198 * lock held and interrupts disabled so they cannot sleep.
199 */
200 struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *);
207 #define INTERRUPT_UNKNOWN 0
208 #define INTERRUPT_DONE 1
209 #define INTERRUPT_NEXT_BUFFER 2
210
211 size_t num_channels;
212 struct ep93xx_dma_chan channels[];
213 };
214
215 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216 {
217 return &edmac->chan.dev->device;
218 }
219
220 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221 {
222 return container_of(chan, struct ep93xx_dma_chan, chan);
223 }
224
225 /**
226 * ep93xx_dma_set_active - set new active descriptor chain
227 * @edmac: channel
228 * @desc: head of the new active descriptor chain
229 *
230 * Sets @desc to be the head of the new active descriptor chain. This is the
231 * chain which is processed next. The active list must be empty before calling
232 * this function.
233 *
234 * Called with @edmac->lock held and interrupts disabled.
235 */
236 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 struct ep93xx_dma_desc *desc)
238 {
239 BUG_ON(!list_empty(&edmac->active));
240
241 list_add_tail(&desc->node, &edmac->active);
242
243 /* Flatten the @desc->tx_list chain into @edmac->active list */
244 while (!list_empty(&desc->tx_list)) {
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 struct ep93xx_dma_desc, node);
247
248 /*
249 * We copy the callback parameters from the first descriptor
250 * to all the chained descriptors. This way we can call the
251 * callback without having to find out the first descriptor in
252 * the chain. Useful for cyclic transfers.
253 */
254 d->txd.callback = desc->txd.callback;
255 d->txd.callback_param = desc->txd.callback_param;
256
257 list_move_tail(&d->node, &edmac->active);
258 }
259 }
260
261 /* Called with @edmac->lock held and interrupts disabled */
262 static struct ep93xx_dma_desc *
263 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264 {
265 if (list_empty(&edmac->active))
266 return NULL;
267
268 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269 }
270
271 /**
272 * ep93xx_dma_advance_active - advances to the next active descriptor
273 * @edmac: channel
274 *
275 * Function advances active descriptor to the next in the @edmac->active and
276 * returns %true if we still have descriptors in the chain to process.
277 * Otherwise returns %false.
278 *
279 * When the channel is in cyclic mode always returns %true.
280 *
281 * Called with @edmac->lock held and interrupts disabled.
282 */
283 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284 {
285 struct ep93xx_dma_desc *desc;
286
287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
296 /*
297 * If txd.cookie is set it means that we are back in the first
298 * descriptor in the chain and hence done with it.
299 */
300 return !desc->txd.cookie;
301 }
302
303 /*
304 * M2P DMA implementation
305 */
306
307 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308 {
309 writel(control, edmac->regs + M2P_CONTROL);
310 /*
311 * EP93xx User's Guide states that we must perform a dummy read after
312 * write to the control register.
313 */
314 readl(edmac->regs + M2P_CONTROL);
315 }
316
317 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318 {
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 return 0;
329 }
330
331 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332 {
333 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334 }
335
336 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337 {
338 u32 control;
339
340 control = readl(edmac->regs + M2P_CONTROL);
341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342 m2p_set_control(edmac, control);
343
344 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345 cpu_relax();
346
347 m2p_set_control(edmac, 0);
348
349 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350 cpu_relax();
351 }
352
353 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354 {
355 struct ep93xx_dma_desc *desc;
356 u32 bus_addr;
357
358 desc = ep93xx_dma_get_active(edmac);
359 if (!desc) {
360 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361 return;
362 }
363
364 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
365 bus_addr = desc->src_addr;
366 else
367 bus_addr = desc->dst_addr;
368
369 if (edmac->buffer == 0) {
370 writel(desc->size, edmac->regs + M2P_MAXCNT0);
371 writel(bus_addr, edmac->regs + M2P_BASE0);
372 } else {
373 writel(desc->size, edmac->regs + M2P_MAXCNT1);
374 writel(bus_addr, edmac->regs + M2P_BASE1);
375 }
376
377 edmac->buffer ^= 1;
378 }
379
380 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381 {
382 u32 control = readl(edmac->regs + M2P_CONTROL);
383
384 m2p_fill_desc(edmac);
385 control |= M2P_CONTROL_STALLINT;
386
387 if (ep93xx_dma_advance_active(edmac)) {
388 m2p_fill_desc(edmac);
389 control |= M2P_CONTROL_NFBINT;
390 }
391
392 m2p_set_control(edmac, control);
393 }
394
395 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396 {
397 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398 u32 control;
399
400 if (irq_status & M2P_INTERRUPT_ERROR) {
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403 /* Clear the error interrupt */
404 writel(1, edmac->regs + M2P_INTERRUPT);
405
406 /*
407 * It seems that there is no easy way of reporting errors back
408 * to client so we just report the error here and continue as
409 * usual.
410 *
411 * Revisit this when there is a mechanism to report back the
412 * errors.
413 */
414 dev_err(chan2dev(edmac),
415 "DMA transfer failed! Details:\n"
416 "\tcookie : %d\n"
417 "\tsrc_addr : 0x%08x\n"
418 "\tdst_addr : 0x%08x\n"
419 "\tsize : %zu\n",
420 desc->txd.cookie, desc->src_addr, desc->dst_addr,
421 desc->size);
422 }
423
424 /*
425 * Even latest E2 silicon revision sometimes assert STALL interrupt
426 * instead of NFB. Therefore we treat them equally, basing on the
427 * amount of data we still have to transfer.
428 */
429 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
430 return INTERRUPT_UNKNOWN;
431
432 if (ep93xx_dma_advance_active(edmac)) {
433 m2p_fill_desc(edmac);
434 return INTERRUPT_NEXT_BUFFER;
435 }
436
437 /* Disable interrupts */
438 control = readl(edmac->regs + M2P_CONTROL);
439 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
440 m2p_set_control(edmac, control);
441
442 return INTERRUPT_DONE;
443 }
444
445 /*
446 * M2M DMA implementation
447 */
448
449 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
450 {
451 const struct ep93xx_dma_data *data = edmac->chan.private;
452 u32 control = 0;
453
454 if (!data) {
455 /* This is memcpy channel, nothing to configure */
456 writel(control, edmac->regs + M2M_CONTROL);
457 return 0;
458 }
459
460 switch (data->port) {
461 case EP93XX_DMA_SSP:
462 /*
463 * This was found via experimenting - anything less than 5
464 * causes the channel to perform only a partial transfer which
465 * leads to problems since we don't get DONE interrupt then.
466 */
467 control = (5 << M2M_CONTROL_PWSC_SHIFT);
468 control |= M2M_CONTROL_NO_HDSK;
469
470 if (data->direction == DMA_MEM_TO_DEV) {
471 control |= M2M_CONTROL_DAH;
472 control |= M2M_CONTROL_TM_TX;
473 control |= M2M_CONTROL_RSS_SSPTX;
474 } else {
475 control |= M2M_CONTROL_SAH;
476 control |= M2M_CONTROL_TM_RX;
477 control |= M2M_CONTROL_RSS_SSPRX;
478 }
479 break;
480
481 case EP93XX_DMA_IDE:
482 /*
483 * This IDE part is totally untested. Values below are taken
484 * from the EP93xx Users's Guide and might not be correct.
485 */
486 if (data->direction == DMA_MEM_TO_DEV) {
487 /* Worst case from the UG */
488 control = (3 << M2M_CONTROL_PWSC_SHIFT);
489 control |= M2M_CONTROL_DAH;
490 control |= M2M_CONTROL_TM_TX;
491 } else {
492 control = (2 << M2M_CONTROL_PWSC_SHIFT);
493 control |= M2M_CONTROL_SAH;
494 control |= M2M_CONTROL_TM_RX;
495 }
496
497 control |= M2M_CONTROL_NO_HDSK;
498 control |= M2M_CONTROL_RSS_IDE;
499 control |= M2M_CONTROL_PW_16;
500 break;
501
502 default:
503 return -EINVAL;
504 }
505
506 writel(control, edmac->regs + M2M_CONTROL);
507 return 0;
508 }
509
510 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
511 {
512 /* Just disable the channel */
513 writel(0, edmac->regs + M2M_CONTROL);
514 }
515
516 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
517 {
518 struct ep93xx_dma_desc *desc;
519
520 desc = ep93xx_dma_get_active(edmac);
521 if (!desc) {
522 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
523 return;
524 }
525
526 if (edmac->buffer == 0) {
527 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
528 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
529 writel(desc->size, edmac->regs + M2M_BCR0);
530 } else {
531 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
532 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
533 writel(desc->size, edmac->regs + M2M_BCR1);
534 }
535
536 edmac->buffer ^= 1;
537 }
538
539 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
540 {
541 struct ep93xx_dma_data *data = edmac->chan.private;
542 u32 control = readl(edmac->regs + M2M_CONTROL);
543
544 /*
545 * Since we allow clients to configure PW (peripheral width) we always
546 * clear PW bits here and then set them according what is given in
547 * the runtime configuration.
548 */
549 control &= ~M2M_CONTROL_PW_MASK;
550 control |= edmac->runtime_ctrl;
551
552 m2m_fill_desc(edmac);
553 control |= M2M_CONTROL_DONEINT;
554
555 if (ep93xx_dma_advance_active(edmac)) {
556 m2m_fill_desc(edmac);
557 control |= M2M_CONTROL_NFBINT;
558 }
559
560 /*
561 * Now we can finally enable the channel. For M2M channel this must be
562 * done _after_ the BCRx registers are programmed.
563 */
564 control |= M2M_CONTROL_ENABLE;
565 writel(control, edmac->regs + M2M_CONTROL);
566
567 if (!data) {
568 /*
569 * For memcpy channels the software trigger must be asserted
570 * in order to start the memcpy operation.
571 */
572 control |= M2M_CONTROL_START;
573 writel(control, edmac->regs + M2M_CONTROL);
574 }
575 }
576
577 /*
578 * According to EP93xx User's Guide, we should receive DONE interrupt when all
579 * M2M DMA controller transactions complete normally. This is not always the
580 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
581 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
582 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
583 * In effect, disabling the channel when only DONE bit is set could stop
584 * currently running DMA transfer. To avoid this, we use Buffer FSM and
585 * Control FSM to check current state of DMA channel.
586 */
587 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
588 {
589 u32 status = readl(edmac->regs + M2M_STATUS);
590 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
591 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
592 bool done = status & M2M_STATUS_DONE;
593 bool last_done;
594 u32 control;
595 struct ep93xx_dma_desc *desc;
596
597 /* Accept only DONE and NFB interrupts */
598 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
599 return INTERRUPT_UNKNOWN;
600
601 if (done) {
602 /* Clear the DONE bit */
603 writel(0, edmac->regs + M2M_INTERRUPT);
604 }
605
606 /*
607 * Check whether we are done with descriptors or not. This, together
608 * with DMA channel state, determines action to take in interrupt.
609 */
610 desc = ep93xx_dma_get_active(edmac);
611 last_done = !desc || desc->txd.cookie;
612
613 /*
614 * Use M2M DMA Buffer FSM and Control FSM to check current state of
615 * DMA channel. Using DONE and NFB bits from channel status register
616 * or bits from channel interrupt register is not reliable.
617 */
618 if (!last_done &&
619 (buf_fsm == M2M_STATUS_BUF_NO ||
620 buf_fsm == M2M_STATUS_BUF_ON)) {
621 /*
622 * Two buffers are ready for update when Buffer FSM is in
623 * DMA_NO_BUF state. Only one buffer can be prepared without
624 * disabling the channel or polling the DONE bit.
625 * To simplify things, always prepare only one buffer.
626 */
627 if (ep93xx_dma_advance_active(edmac)) {
628 m2m_fill_desc(edmac);
629 if (done && !edmac->chan.private) {
630 /* Software trigger for memcpy channel */
631 control = readl(edmac->regs + M2M_CONTROL);
632 control |= M2M_CONTROL_START;
633 writel(control, edmac->regs + M2M_CONTROL);
634 }
635 return INTERRUPT_NEXT_BUFFER;
636 } else {
637 last_done = true;
638 }
639 }
640
641 /*
642 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
643 * and Control FSM is in DMA_STALL state.
644 */
645 if (last_done &&
646 buf_fsm == M2M_STATUS_BUF_NO &&
647 ctl_fsm == M2M_STATUS_CTL_STALL) {
648 /* Disable interrupts and the channel */
649 control = readl(edmac->regs + M2M_CONTROL);
650 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
651 | M2M_CONTROL_ENABLE);
652 writel(control, edmac->regs + M2M_CONTROL);
653 return INTERRUPT_DONE;
654 }
655
656 /*
657 * Nothing to do this time.
658 */
659 return INTERRUPT_NEXT_BUFFER;
660 }
661
662 /*
663 * DMA engine API implementation
664 */
665
666 static struct ep93xx_dma_desc *
667 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
668 {
669 struct ep93xx_dma_desc *desc, *_desc;
670 struct ep93xx_dma_desc *ret = NULL;
671 unsigned long flags;
672
673 spin_lock_irqsave(&edmac->lock, flags);
674 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
675 if (async_tx_test_ack(&desc->txd)) {
676 list_del_init(&desc->node);
677
678 /* Re-initialize the descriptor */
679 desc->src_addr = 0;
680 desc->dst_addr = 0;
681 desc->size = 0;
682 desc->complete = false;
683 desc->txd.cookie = 0;
684 desc->txd.callback = NULL;
685 desc->txd.callback_param = NULL;
686
687 ret = desc;
688 break;
689 }
690 }
691 spin_unlock_irqrestore(&edmac->lock, flags);
692 return ret;
693 }
694
695 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
696 struct ep93xx_dma_desc *desc)
697 {
698 if (desc) {
699 unsigned long flags;
700
701 spin_lock_irqsave(&edmac->lock, flags);
702 list_splice_init(&desc->tx_list, &edmac->free_list);
703 list_add(&desc->node, &edmac->free_list);
704 spin_unlock_irqrestore(&edmac->lock, flags);
705 }
706 }
707
708 /**
709 * ep93xx_dma_advance_work - start processing the next pending transaction
710 * @edmac: channel
711 *
712 * If we have pending transactions queued and we are currently idling, this
713 * function takes the next queued transaction from the @edmac->queue and
714 * pushes it to the hardware for execution.
715 */
716 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
717 {
718 struct ep93xx_dma_desc *new;
719 unsigned long flags;
720
721 spin_lock_irqsave(&edmac->lock, flags);
722 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
723 spin_unlock_irqrestore(&edmac->lock, flags);
724 return;
725 }
726
727 /* Take the next descriptor from the pending queue */
728 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
729 list_del_init(&new->node);
730
731 ep93xx_dma_set_active(edmac, new);
732
733 /* Push it to the hardware */
734 edmac->edma->hw_submit(edmac);
735 spin_unlock_irqrestore(&edmac->lock, flags);
736 }
737
738 static void ep93xx_dma_tasklet(unsigned long data)
739 {
740 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
741 struct ep93xx_dma_desc *desc, *d;
742 dma_async_tx_callback callback = NULL;
743 void *callback_param = NULL;
744 LIST_HEAD(list);
745
746 spin_lock_irq(&edmac->lock);
747 /*
748 * If dma_terminate_all() was called before we get to run, the active
749 * list has become empty. If that happens we aren't supposed to do
750 * anything more than call ep93xx_dma_advance_work().
751 */
752 desc = ep93xx_dma_get_active(edmac);
753 if (desc) {
754 if (desc->complete) {
755 /* mark descriptor complete for non cyclic case only */
756 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
757 dma_cookie_complete(&desc->txd);
758 list_splice_init(&edmac->active, &list);
759 }
760 callback = desc->txd.callback;
761 callback_param = desc->txd.callback_param;
762 }
763 spin_unlock_irq(&edmac->lock);
764
765 /* Pick up the next descriptor from the queue */
766 ep93xx_dma_advance_work(edmac);
767
768 /* Now we can release all the chained descriptors */
769 list_for_each_entry_safe(desc, d, &list, node) {
770 dma_descriptor_unmap(&desc->txd);
771 ep93xx_dma_desc_put(edmac, desc);
772 }
773
774 if (callback)
775 callback(callback_param);
776 }
777
778 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
779 {
780 struct ep93xx_dma_chan *edmac = dev_id;
781 struct ep93xx_dma_desc *desc;
782 irqreturn_t ret = IRQ_HANDLED;
783
784 spin_lock(&edmac->lock);
785
786 desc = ep93xx_dma_get_active(edmac);
787 if (!desc) {
788 dev_warn(chan2dev(edmac),
789 "got interrupt while active list is empty\n");
790 spin_unlock(&edmac->lock);
791 return IRQ_NONE;
792 }
793
794 switch (edmac->edma->hw_interrupt(edmac)) {
795 case INTERRUPT_DONE:
796 desc->complete = true;
797 tasklet_schedule(&edmac->tasklet);
798 break;
799
800 case INTERRUPT_NEXT_BUFFER:
801 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
802 tasklet_schedule(&edmac->tasklet);
803 break;
804
805 default:
806 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
807 ret = IRQ_NONE;
808 break;
809 }
810
811 spin_unlock(&edmac->lock);
812 return ret;
813 }
814
815 /**
816 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
817 * @tx: descriptor to be executed
818 *
819 * Function will execute given descriptor on the hardware or if the hardware
820 * is busy, queue the descriptor to be executed later on. Returns cookie which
821 * can be used to poll the status of the descriptor.
822 */
823 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
824 {
825 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
826 struct ep93xx_dma_desc *desc;
827 dma_cookie_t cookie;
828 unsigned long flags;
829
830 spin_lock_irqsave(&edmac->lock, flags);
831 cookie = dma_cookie_assign(tx);
832
833 desc = container_of(tx, struct ep93xx_dma_desc, txd);
834
835 /*
836 * If nothing is currently prosessed, we push this descriptor
837 * directly to the hardware. Otherwise we put the descriptor
838 * to the pending queue.
839 */
840 if (list_empty(&edmac->active)) {
841 ep93xx_dma_set_active(edmac, desc);
842 edmac->edma->hw_submit(edmac);
843 } else {
844 list_add_tail(&desc->node, &edmac->queue);
845 }
846
847 spin_unlock_irqrestore(&edmac->lock, flags);
848 return cookie;
849 }
850
851 /**
852 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
853 * @chan: channel to allocate resources
854 *
855 * Function allocates necessary resources for the given DMA channel and
856 * returns number of allocated descriptors for the channel. Negative errno
857 * is returned in case of failure.
858 */
859 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
860 {
861 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
862 struct ep93xx_dma_data *data = chan->private;
863 const char *name = dma_chan_name(chan);
864 int ret, i;
865
866 /* Sanity check the channel parameters */
867 if (!edmac->edma->m2m) {
868 if (!data)
869 return -EINVAL;
870 if (data->port < EP93XX_DMA_I2S1 ||
871 data->port > EP93XX_DMA_IRDA)
872 return -EINVAL;
873 if (data->direction != ep93xx_dma_chan_direction(chan))
874 return -EINVAL;
875 } else {
876 if (data) {
877 switch (data->port) {
878 case EP93XX_DMA_SSP:
879 case EP93XX_DMA_IDE:
880 if (!is_slave_direction(data->direction))
881 return -EINVAL;
882 break;
883 default:
884 return -EINVAL;
885 }
886 }
887 }
888
889 if (data && data->name)
890 name = data->name;
891
892 ret = clk_enable(edmac->clk);
893 if (ret)
894 return ret;
895
896 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
897 if (ret)
898 goto fail_clk_disable;
899
900 spin_lock_irq(&edmac->lock);
901 dma_cookie_init(&edmac->chan);
902 ret = edmac->edma->hw_setup(edmac);
903 spin_unlock_irq(&edmac->lock);
904
905 if (ret)
906 goto fail_free_irq;
907
908 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
909 struct ep93xx_dma_desc *desc;
910
911 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
912 if (!desc) {
913 dev_warn(chan2dev(edmac), "not enough descriptors\n");
914 break;
915 }
916
917 INIT_LIST_HEAD(&desc->tx_list);
918
919 dma_async_tx_descriptor_init(&desc->txd, chan);
920 desc->txd.flags = DMA_CTRL_ACK;
921 desc->txd.tx_submit = ep93xx_dma_tx_submit;
922
923 ep93xx_dma_desc_put(edmac, desc);
924 }
925
926 return i;
927
928 fail_free_irq:
929 free_irq(edmac->irq, edmac);
930 fail_clk_disable:
931 clk_disable(edmac->clk);
932
933 return ret;
934 }
935
936 /**
937 * ep93xx_dma_free_chan_resources - release resources for the channel
938 * @chan: channel
939 *
940 * Function releases all the resources allocated for the given channel.
941 * The channel must be idle when this is called.
942 */
943 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
944 {
945 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
946 struct ep93xx_dma_desc *desc, *d;
947 unsigned long flags;
948 LIST_HEAD(list);
949
950 BUG_ON(!list_empty(&edmac->active));
951 BUG_ON(!list_empty(&edmac->queue));
952
953 spin_lock_irqsave(&edmac->lock, flags);
954 edmac->edma->hw_shutdown(edmac);
955 edmac->runtime_addr = 0;
956 edmac->runtime_ctrl = 0;
957 edmac->buffer = 0;
958 list_splice_init(&edmac->free_list, &list);
959 spin_unlock_irqrestore(&edmac->lock, flags);
960
961 list_for_each_entry_safe(desc, d, &list, node)
962 kfree(desc);
963
964 clk_disable(edmac->clk);
965 free_irq(edmac->irq, edmac);
966 }
967
968 /**
969 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
970 * @chan: channel
971 * @dest: destination bus address
972 * @src: source bus address
973 * @len: size of the transaction
974 * @flags: flags for the descriptor
975 *
976 * Returns a valid DMA descriptor or %NULL in case of failure.
977 */
978 static struct dma_async_tx_descriptor *
979 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
980 dma_addr_t src, size_t len, unsigned long flags)
981 {
982 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
983 struct ep93xx_dma_desc *desc, *first;
984 size_t bytes, offset;
985
986 first = NULL;
987 for (offset = 0; offset < len; offset += bytes) {
988 desc = ep93xx_dma_desc_get(edmac);
989 if (!desc) {
990 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
991 goto fail;
992 }
993
994 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
995
996 desc->src_addr = src + offset;
997 desc->dst_addr = dest + offset;
998 desc->size = bytes;
999
1000 if (!first)
1001 first = desc;
1002 else
1003 list_add_tail(&desc->node, &first->tx_list);
1004 }
1005
1006 first->txd.cookie = -EBUSY;
1007 first->txd.flags = flags;
1008
1009 return &first->txd;
1010 fail:
1011 ep93xx_dma_desc_put(edmac, first);
1012 return NULL;
1013 }
1014
1015 /**
1016 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1017 * @chan: channel
1018 * @sgl: list of buffers to transfer
1019 * @sg_len: number of entries in @sgl
1020 * @dir: direction of tha DMA transfer
1021 * @flags: flags for the descriptor
1022 * @context: operation context (ignored)
1023 *
1024 * Returns a valid DMA descriptor or %NULL in case of failure.
1025 */
1026 static struct dma_async_tx_descriptor *
1027 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1028 unsigned int sg_len, enum dma_transfer_direction dir,
1029 unsigned long flags, void *context)
1030 {
1031 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1032 struct ep93xx_dma_desc *desc, *first;
1033 struct scatterlist *sg;
1034 int i;
1035
1036 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1037 dev_warn(chan2dev(edmac),
1038 "channel was configured with different direction\n");
1039 return NULL;
1040 }
1041
1042 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1043 dev_warn(chan2dev(edmac),
1044 "channel is already used for cyclic transfers\n");
1045 return NULL;
1046 }
1047
1048 first = NULL;
1049 for_each_sg(sgl, sg, sg_len, i) {
1050 size_t sg_len = sg_dma_len(sg);
1051
1052 if (sg_len > DMA_MAX_CHAN_BYTES) {
1053 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1054 sg_len);
1055 goto fail;
1056 }
1057
1058 desc = ep93xx_dma_desc_get(edmac);
1059 if (!desc) {
1060 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1061 goto fail;
1062 }
1063
1064 if (dir == DMA_MEM_TO_DEV) {
1065 desc->src_addr = sg_dma_address(sg);
1066 desc->dst_addr = edmac->runtime_addr;
1067 } else {
1068 desc->src_addr = edmac->runtime_addr;
1069 desc->dst_addr = sg_dma_address(sg);
1070 }
1071 desc->size = sg_len;
1072
1073 if (!first)
1074 first = desc;
1075 else
1076 list_add_tail(&desc->node, &first->tx_list);
1077 }
1078
1079 first->txd.cookie = -EBUSY;
1080 first->txd.flags = flags;
1081
1082 return &first->txd;
1083
1084 fail:
1085 ep93xx_dma_desc_put(edmac, first);
1086 return NULL;
1087 }
1088
1089 /**
1090 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1091 * @chan: channel
1092 * @dma_addr: DMA mapped address of the buffer
1093 * @buf_len: length of the buffer (in bytes)
1094 * @period_len: length of a single period
1095 * @dir: direction of the operation
1096 * @flags: tx descriptor status flags
1097 *
1098 * Prepares a descriptor for cyclic DMA operation. This means that once the
1099 * descriptor is submitted, we will be submitting in a @period_len sized
1100 * buffers and calling callback once the period has been elapsed. Transfer
1101 * terminates only when client calls dmaengine_terminate_all() for this
1102 * channel.
1103 *
1104 * Returns a valid DMA descriptor or %NULL in case of failure.
1105 */
1106 static struct dma_async_tx_descriptor *
1107 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1108 size_t buf_len, size_t period_len,
1109 enum dma_transfer_direction dir, unsigned long flags)
1110 {
1111 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1112 struct ep93xx_dma_desc *desc, *first;
1113 size_t offset = 0;
1114
1115 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1116 dev_warn(chan2dev(edmac),
1117 "channel was configured with different direction\n");
1118 return NULL;
1119 }
1120
1121 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1122 dev_warn(chan2dev(edmac),
1123 "channel is already used for cyclic transfers\n");
1124 return NULL;
1125 }
1126
1127 if (period_len > DMA_MAX_CHAN_BYTES) {
1128 dev_warn(chan2dev(edmac), "too big period length %d\n",
1129 period_len);
1130 return NULL;
1131 }
1132
1133 /* Split the buffer into period size chunks */
1134 first = NULL;
1135 for (offset = 0; offset < buf_len; offset += period_len) {
1136 desc = ep93xx_dma_desc_get(edmac);
1137 if (!desc) {
1138 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1139 goto fail;
1140 }
1141
1142 if (dir == DMA_MEM_TO_DEV) {
1143 desc->src_addr = dma_addr + offset;
1144 desc->dst_addr = edmac->runtime_addr;
1145 } else {
1146 desc->src_addr = edmac->runtime_addr;
1147 desc->dst_addr = dma_addr + offset;
1148 }
1149
1150 desc->size = period_len;
1151
1152 if (!first)
1153 first = desc;
1154 else
1155 list_add_tail(&desc->node, &first->tx_list);
1156 }
1157
1158 first->txd.cookie = -EBUSY;
1159
1160 return &first->txd;
1161
1162 fail:
1163 ep93xx_dma_desc_put(edmac, first);
1164 return NULL;
1165 }
1166
1167 /**
1168 * ep93xx_dma_terminate_all - terminate all transactions
1169 * @chan: channel
1170 *
1171 * Stops all DMA transactions. All descriptors are put back to the
1172 * @edmac->free_list and callbacks are _not_ called.
1173 */
1174 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1175 {
1176 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1177 struct ep93xx_dma_desc *desc, *_d;
1178 unsigned long flags;
1179 LIST_HEAD(list);
1180
1181 spin_lock_irqsave(&edmac->lock, flags);
1182 /* First we disable and flush the DMA channel */
1183 edmac->edma->hw_shutdown(edmac);
1184 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1185 list_splice_init(&edmac->active, &list);
1186 list_splice_init(&edmac->queue, &list);
1187 /*
1188 * We then re-enable the channel. This way we can continue submitting
1189 * the descriptors by just calling ->hw_submit() again.
1190 */
1191 edmac->edma->hw_setup(edmac);
1192 spin_unlock_irqrestore(&edmac->lock, flags);
1193
1194 list_for_each_entry_safe(desc, _d, &list, node)
1195 ep93xx_dma_desc_put(edmac, desc);
1196
1197 return 0;
1198 }
1199
1200 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1201 struct dma_slave_config *config)
1202 {
1203 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1204 enum dma_slave_buswidth width;
1205 unsigned long flags;
1206 u32 addr, ctrl;
1207
1208 if (!edmac->edma->m2m)
1209 return -EINVAL;
1210
1211 switch (config->direction) {
1212 case DMA_DEV_TO_MEM:
1213 width = config->src_addr_width;
1214 addr = config->src_addr;
1215 break;
1216
1217 case DMA_MEM_TO_DEV:
1218 width = config->dst_addr_width;
1219 addr = config->dst_addr;
1220 break;
1221
1222 default:
1223 return -EINVAL;
1224 }
1225
1226 switch (width) {
1227 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1228 ctrl = 0;
1229 break;
1230 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1231 ctrl = M2M_CONTROL_PW_16;
1232 break;
1233 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1234 ctrl = M2M_CONTROL_PW_32;
1235 break;
1236 default:
1237 return -EINVAL;
1238 }
1239
1240 spin_lock_irqsave(&edmac->lock, flags);
1241 edmac->runtime_addr = addr;
1242 edmac->runtime_ctrl = ctrl;
1243 spin_unlock_irqrestore(&edmac->lock, flags);
1244
1245 return 0;
1246 }
1247
1248 /**
1249 * ep93xx_dma_tx_status - check if a transaction is completed
1250 * @chan: channel
1251 * @cookie: transaction specific cookie
1252 * @state: state of the transaction is stored here if given
1253 *
1254 * This function can be used to query state of a given transaction.
1255 */
1256 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1257 dma_cookie_t cookie,
1258 struct dma_tx_state *state)
1259 {
1260 return dma_cookie_status(chan, cookie, state);
1261 }
1262
1263 /**
1264 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1265 * @chan: channel
1266 *
1267 * When this function is called, all pending transactions are pushed to the
1268 * hardware and executed.
1269 */
1270 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1271 {
1272 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1273 }
1274
1275 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1276 {
1277 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1278 struct ep93xx_dma_engine *edma;
1279 struct dma_device *dma_dev;
1280 size_t edma_size;
1281 int ret, i;
1282
1283 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1284 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1285 if (!edma)
1286 return -ENOMEM;
1287
1288 dma_dev = &edma->dma_dev;
1289 edma->m2m = platform_get_device_id(pdev)->driver_data;
1290 edma->num_channels = pdata->num_channels;
1291
1292 INIT_LIST_HEAD(&dma_dev->channels);
1293 for (i = 0; i < pdata->num_channels; i++) {
1294 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1295 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1296
1297 edmac->chan.device = dma_dev;
1298 edmac->regs = cdata->base;
1299 edmac->irq = cdata->irq;
1300 edmac->edma = edma;
1301
1302 edmac->clk = clk_get(NULL, cdata->name);
1303 if (IS_ERR(edmac->clk)) {
1304 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1305 cdata->name);
1306 continue;
1307 }
1308
1309 spin_lock_init(&edmac->lock);
1310 INIT_LIST_HEAD(&edmac->active);
1311 INIT_LIST_HEAD(&edmac->queue);
1312 INIT_LIST_HEAD(&edmac->free_list);
1313 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1314 (unsigned long)edmac);
1315
1316 list_add_tail(&edmac->chan.device_node,
1317 &dma_dev->channels);
1318 }
1319
1320 dma_cap_zero(dma_dev->cap_mask);
1321 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1322 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1323
1324 dma_dev->dev = &pdev->dev;
1325 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1326 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1327 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1328 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1329 dma_dev->device_config = ep93xx_dma_slave_config;
1330 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1331 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1332 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1333
1334 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1335
1336 if (edma->m2m) {
1337 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1338 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1339
1340 edma->hw_setup = m2m_hw_setup;
1341 edma->hw_shutdown = m2m_hw_shutdown;
1342 edma->hw_submit = m2m_hw_submit;
1343 edma->hw_interrupt = m2m_hw_interrupt;
1344 } else {
1345 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1346
1347 edma->hw_setup = m2p_hw_setup;
1348 edma->hw_shutdown = m2p_hw_shutdown;
1349 edma->hw_submit = m2p_hw_submit;
1350 edma->hw_interrupt = m2p_hw_interrupt;
1351 }
1352
1353 ret = dma_async_device_register(dma_dev);
1354 if (unlikely(ret)) {
1355 for (i = 0; i < edma->num_channels; i++) {
1356 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1357 if (!IS_ERR_OR_NULL(edmac->clk))
1358 clk_put(edmac->clk);
1359 }
1360 kfree(edma);
1361 } else {
1362 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1363 edma->m2m ? "M" : "P");
1364 }
1365
1366 return ret;
1367 }
1368
1369 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1370 { "ep93xx-dma-m2p", 0 },
1371 { "ep93xx-dma-m2m", 1 },
1372 { },
1373 };
1374
1375 static struct platform_driver ep93xx_dma_driver = {
1376 .driver = {
1377 .name = "ep93xx-dma",
1378 },
1379 .id_table = ep93xx_dma_driver_ids,
1380 };
1381
1382 static int __init ep93xx_dma_module_init(void)
1383 {
1384 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1385 }
1386 subsys_initcall(ep93xx_dma_module_init);
1387
1388 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1389 MODULE_DESCRIPTION("EP93xx DMA driver");
1390 MODULE_LICENSE("GPL");
This page took 0.074824 seconds and 5 git commands to generate.