Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / dma / imx-dma.c
1 /*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 *
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/of_dma.h>
32
33 #include <asm/irq.h>
34 #include <linux/platform_data/dma-imx.h>
35
36 #include "dmaengine.h"
37 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
38 #define IMX_DMA_CHANNELS 16
39
40 #define IMX_DMA_2D_SLOTS 2
41 #define IMX_DMA_2D_SLOT_A 0
42 #define IMX_DMA_2D_SLOT_B 1
43
44 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
45 #define IMX_DMA_MEMSIZE_32 (0 << 4)
46 #define IMX_DMA_MEMSIZE_8 (1 << 4)
47 #define IMX_DMA_MEMSIZE_16 (2 << 4)
48 #define IMX_DMA_TYPE_LINEAR (0 << 10)
49 #define IMX_DMA_TYPE_2D (1 << 10)
50 #define IMX_DMA_TYPE_FIFO (2 << 10)
51
52 #define IMX_DMA_ERR_BURST (1 << 0)
53 #define IMX_DMA_ERR_REQUEST (1 << 1)
54 #define IMX_DMA_ERR_TRANSFER (1 << 2)
55 #define IMX_DMA_ERR_BUFFER (1 << 3)
56 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
57
58 #define DMA_DCR 0x00 /* Control Register */
59 #define DMA_DISR 0x04 /* Interrupt status Register */
60 #define DMA_DIMR 0x08 /* Interrupt mask Register */
61 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
62 #define DMA_DRTOSR 0x10 /* Request timeout Register */
63 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
64 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
65 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
66 #define DMA_WSRA 0x40 /* W-Size Register A */
67 #define DMA_XSRA 0x44 /* X-Size Register A */
68 #define DMA_YSRA 0x48 /* Y-Size Register A */
69 #define DMA_WSRB 0x4c /* W-Size Register B */
70 #define DMA_XSRB 0x50 /* X-Size Register B */
71 #define DMA_YSRB 0x54 /* Y-Size Register B */
72 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
73 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
74 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
75 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
76 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
77 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
78 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
79 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
80 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
81
82 #define DCR_DRST (1<<1)
83 #define DCR_DEN (1<<0)
84 #define DBTOCR_EN (1<<15)
85 #define DBTOCR_CNT(x) ((x) & 0x7fff)
86 #define CNTR_CNT(x) ((x) & 0xffffff)
87 #define CCR_ACRPT (1<<14)
88 #define CCR_DMOD_LINEAR (0x0 << 12)
89 #define CCR_DMOD_2D (0x1 << 12)
90 #define CCR_DMOD_FIFO (0x2 << 12)
91 #define CCR_DMOD_EOBFIFO (0x3 << 12)
92 #define CCR_SMOD_LINEAR (0x0 << 10)
93 #define CCR_SMOD_2D (0x1 << 10)
94 #define CCR_SMOD_FIFO (0x2 << 10)
95 #define CCR_SMOD_EOBFIFO (0x3 << 10)
96 #define CCR_MDIR_DEC (1<<9)
97 #define CCR_MSEL_B (1<<8)
98 #define CCR_DSIZ_32 (0x0 << 6)
99 #define CCR_DSIZ_8 (0x1 << 6)
100 #define CCR_DSIZ_16 (0x2 << 6)
101 #define CCR_SSIZ_32 (0x0 << 4)
102 #define CCR_SSIZ_8 (0x1 << 4)
103 #define CCR_SSIZ_16 (0x2 << 4)
104 #define CCR_REN (1<<3)
105 #define CCR_RPT (1<<2)
106 #define CCR_FRC (1<<1)
107 #define CCR_CEN (1<<0)
108 #define RTOR_EN (1<<15)
109 #define RTOR_CLK (1<<14)
110 #define RTOR_PSC (1<<13)
111
112 enum imxdma_prep_type {
113 IMXDMA_DESC_MEMCPY,
114 IMXDMA_DESC_INTERLEAVED,
115 IMXDMA_DESC_SLAVE_SG,
116 IMXDMA_DESC_CYCLIC,
117 };
118
119 struct imx_dma_2d_config {
120 u16 xsr;
121 u16 ysr;
122 u16 wsr;
123 int count;
124 };
125
126 struct imxdma_desc {
127 struct list_head node;
128 struct dma_async_tx_descriptor desc;
129 enum dma_status status;
130 dma_addr_t src;
131 dma_addr_t dest;
132 size_t len;
133 enum dma_transfer_direction direction;
134 enum imxdma_prep_type type;
135 /* For memcpy and interleaved */
136 unsigned int config_port;
137 unsigned int config_mem;
138 /* For interleaved transfers */
139 unsigned int x;
140 unsigned int y;
141 unsigned int w;
142 /* For slave sg and cyclic */
143 struct scatterlist *sg;
144 unsigned int sgcount;
145 };
146
147 struct imxdma_channel {
148 int hw_chaining;
149 struct timer_list watchdog;
150 struct imxdma_engine *imxdma;
151 unsigned int channel;
152
153 struct tasklet_struct dma_tasklet;
154 struct list_head ld_free;
155 struct list_head ld_queue;
156 struct list_head ld_active;
157 int descs_allocated;
158 enum dma_slave_buswidth word_size;
159 dma_addr_t per_address;
160 u32 watermark_level;
161 struct dma_chan chan;
162 struct dma_async_tx_descriptor desc;
163 enum dma_status status;
164 int dma_request;
165 struct scatterlist *sg_list;
166 u32 ccr_from_device;
167 u32 ccr_to_device;
168 bool enabled_2d;
169 int slot_2d;
170 unsigned int irq;
171 };
172
173 enum imx_dma_type {
174 IMX1_DMA,
175 IMX21_DMA,
176 IMX27_DMA,
177 };
178
179 struct imxdma_engine {
180 struct device *dev;
181 struct device_dma_parameters dma_parms;
182 struct dma_device dma_device;
183 void __iomem *base;
184 struct clk *dma_ahb;
185 struct clk *dma_ipg;
186 spinlock_t lock;
187 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
188 struct imxdma_channel channel[IMX_DMA_CHANNELS];
189 enum imx_dma_type devtype;
190 unsigned int irq;
191 unsigned int irq_err;
192
193 };
194
195 struct imxdma_filter_data {
196 struct imxdma_engine *imxdma;
197 int request;
198 };
199
200 static const struct platform_device_id imx_dma_devtype[] = {
201 {
202 .name = "imx1-dma",
203 .driver_data = IMX1_DMA,
204 }, {
205 .name = "imx21-dma",
206 .driver_data = IMX21_DMA,
207 }, {
208 .name = "imx27-dma",
209 .driver_data = IMX27_DMA,
210 }, {
211 /* sentinel */
212 }
213 };
214 MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
215
216 static const struct of_device_id imx_dma_of_dev_id[] = {
217 {
218 .compatible = "fsl,imx1-dma",
219 .data = &imx_dma_devtype[IMX1_DMA],
220 }, {
221 .compatible = "fsl,imx21-dma",
222 .data = &imx_dma_devtype[IMX21_DMA],
223 }, {
224 .compatible = "fsl,imx27-dma",
225 .data = &imx_dma_devtype[IMX27_DMA],
226 }, {
227 /* sentinel */
228 }
229 };
230 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
231
232 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
233 {
234 return imxdma->devtype == IMX1_DMA;
235 }
236
237 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
238 {
239 return imxdma->devtype == IMX27_DMA;
240 }
241
242 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
243 {
244 return container_of(chan, struct imxdma_channel, chan);
245 }
246
247 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
248 {
249 struct imxdma_desc *desc;
250
251 if (!list_empty(&imxdmac->ld_active)) {
252 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
253 node);
254 if (desc->type == IMXDMA_DESC_CYCLIC)
255 return true;
256 }
257 return false;
258 }
259
260
261
262 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
263 unsigned offset)
264 {
265 __raw_writel(val, imxdma->base + offset);
266 }
267
268 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
269 {
270 return __raw_readl(imxdma->base + offset);
271 }
272
273 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
274 {
275 struct imxdma_engine *imxdma = imxdmac->imxdma;
276
277 if (is_imx27_dma(imxdma))
278 return imxdmac->hw_chaining;
279 else
280 return 0;
281 }
282
283 /*
284 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
285 */
286 static inline int imxdma_sg_next(struct imxdma_desc *d)
287 {
288 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
289 struct imxdma_engine *imxdma = imxdmac->imxdma;
290 struct scatterlist *sg = d->sg;
291 unsigned long now;
292
293 now = min(d->len, sg_dma_len(sg));
294 if (d->len != IMX_DMA_LENGTH_LOOP)
295 d->len -= now;
296
297 if (d->direction == DMA_DEV_TO_MEM)
298 imx_dmav1_writel(imxdma, sg->dma_address,
299 DMA_DAR(imxdmac->channel));
300 else
301 imx_dmav1_writel(imxdma, sg->dma_address,
302 DMA_SAR(imxdmac->channel));
303
304 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
305
306 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
307 "size 0x%08x\n", __func__, imxdmac->channel,
308 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
309 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
310 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
311
312 return now;
313 }
314
315 static void imxdma_enable_hw(struct imxdma_desc *d)
316 {
317 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
318 struct imxdma_engine *imxdma = imxdmac->imxdma;
319 int channel = imxdmac->channel;
320 unsigned long flags;
321
322 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
323
324 local_irq_save(flags);
325
326 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
327 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
328 ~(1 << channel), DMA_DIMR);
329 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
330 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
331
332 if (!is_imx1_dma(imxdma) &&
333 d->sg && imxdma_hw_chain(imxdmac)) {
334 d->sg = sg_next(d->sg);
335 if (d->sg) {
336 u32 tmp;
337 imxdma_sg_next(d);
338 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
339 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
340 DMA_CCR(channel));
341 }
342 }
343
344 local_irq_restore(flags);
345 }
346
347 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
348 {
349 struct imxdma_engine *imxdma = imxdmac->imxdma;
350 int channel = imxdmac->channel;
351 unsigned long flags;
352
353 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
354
355 if (imxdma_hw_chain(imxdmac))
356 del_timer(&imxdmac->watchdog);
357
358 local_irq_save(flags);
359 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
360 (1 << channel), DMA_DIMR);
361 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
362 ~CCR_CEN, DMA_CCR(channel));
363 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
364 local_irq_restore(flags);
365 }
366
367 static void imxdma_watchdog(unsigned long data)
368 {
369 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
370 struct imxdma_engine *imxdma = imxdmac->imxdma;
371 int channel = imxdmac->channel;
372
373 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
374
375 /* Tasklet watchdog error handler */
376 tasklet_schedule(&imxdmac->dma_tasklet);
377 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
378 imxdmac->channel);
379 }
380
381 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
382 {
383 struct imxdma_engine *imxdma = dev_id;
384 unsigned int err_mask;
385 int i, disr;
386 int errcode;
387
388 disr = imx_dmav1_readl(imxdma, DMA_DISR);
389
390 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
391 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
392 imx_dmav1_readl(imxdma, DMA_DSESR) |
393 imx_dmav1_readl(imxdma, DMA_DBOSR);
394
395 if (!err_mask)
396 return IRQ_HANDLED;
397
398 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
399
400 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
401 if (!(err_mask & (1 << i)))
402 continue;
403 errcode = 0;
404
405 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
406 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
407 errcode |= IMX_DMA_ERR_BURST;
408 }
409 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
410 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
411 errcode |= IMX_DMA_ERR_REQUEST;
412 }
413 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
414 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
415 errcode |= IMX_DMA_ERR_TRANSFER;
416 }
417 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
418 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
419 errcode |= IMX_DMA_ERR_BUFFER;
420 }
421 /* Tasklet error handler */
422 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
423
424 dev_warn(imxdma->dev,
425 "DMA timeout on channel %d -%s%s%s%s\n", i,
426 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
427 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
428 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
429 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
430 }
431 return IRQ_HANDLED;
432 }
433
434 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
435 {
436 struct imxdma_engine *imxdma = imxdmac->imxdma;
437 int chno = imxdmac->channel;
438 struct imxdma_desc *desc;
439 unsigned long flags;
440
441 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out;
445 }
446
447 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc,
449 node);
450 spin_unlock_irqrestore(&imxdma->lock, flags);
451
452 if (desc->sg) {
453 u32 tmp;
454 desc->sg = sg_next(desc->sg);
455
456 if (desc->sg) {
457 imxdma_sg_next(desc);
458
459 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
460
461 if (imxdma_hw_chain(imxdmac)) {
462 /* FIXME: The timeout should probably be
463 * configurable
464 */
465 mod_timer(&imxdmac->watchdog,
466 jiffies + msecs_to_jiffies(500));
467
468 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
469 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
470 } else {
471 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
472 DMA_CCR(chno));
473 tmp |= CCR_CEN;
474 }
475
476 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
477
478 if (imxdma_chan_is_doing_cyclic(imxdmac))
479 /* Tasklet progression */
480 tasklet_schedule(&imxdmac->dma_tasklet);
481
482 return;
483 }
484
485 if (imxdma_hw_chain(imxdmac)) {
486 del_timer(&imxdmac->watchdog);
487 return;
488 }
489 }
490
491 out:
492 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
493 /* Tasklet irq */
494 tasklet_schedule(&imxdmac->dma_tasklet);
495 }
496
497 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
498 {
499 struct imxdma_engine *imxdma = dev_id;
500 int i, disr;
501
502 if (!is_imx1_dma(imxdma))
503 imxdma_err_handler(irq, dev_id);
504
505 disr = imx_dmav1_readl(imxdma, DMA_DISR);
506
507 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
508
509 imx_dmav1_writel(imxdma, disr, DMA_DISR);
510 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
511 if (disr & (1 << i))
512 dma_irq_handle_channel(&imxdma->channel[i]);
513 }
514
515 return IRQ_HANDLED;
516 }
517
518 static int imxdma_xfer_desc(struct imxdma_desc *d)
519 {
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 int slot = -1;
523 int i;
524
525 /* Configure and enable */
526 switch (d->type) {
527 case IMXDMA_DESC_INTERLEAVED:
528 /* Try to get a free 2D slot */
529 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
530 if ((imxdma->slots_2d[i].count > 0) &&
531 ((imxdma->slots_2d[i].xsr != d->x) ||
532 (imxdma->slots_2d[i].ysr != d->y) ||
533 (imxdma->slots_2d[i].wsr != d->w)))
534 continue;
535 slot = i;
536 break;
537 }
538 if (slot < 0)
539 return -EBUSY;
540
541 imxdma->slots_2d[slot].xsr = d->x;
542 imxdma->slots_2d[slot].ysr = d->y;
543 imxdma->slots_2d[slot].wsr = d->w;
544 imxdma->slots_2d[slot].count++;
545
546 imxdmac->slot_2d = slot;
547 imxdmac->enabled_2d = true;
548
549 if (slot == IMX_DMA_2D_SLOT_A) {
550 d->config_mem &= ~CCR_MSEL_B;
551 d->config_port &= ~CCR_MSEL_B;
552 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
553 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
554 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
555 } else {
556 d->config_mem |= CCR_MSEL_B;
557 d->config_port |= CCR_MSEL_B;
558 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
559 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
560 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
561 }
562 /*
563 * We fall-through here intentionally, since a 2D transfer is
564 * similar to MEMCPY just adding the 2D slot configuration.
565 */
566 case IMXDMA_DESC_MEMCPY:
567 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
568 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
569 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
570 DMA_CCR(imxdmac->channel));
571
572 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
573
574 dev_dbg(imxdma->dev,
575 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
576 __func__, imxdmac->channel,
577 (unsigned long long)d->dest,
578 (unsigned long long)d->src, d->len);
579
580 break;
581 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
582 case IMXDMA_DESC_CYCLIC:
583 case IMXDMA_DESC_SLAVE_SG:
584 if (d->direction == DMA_DEV_TO_MEM) {
585 imx_dmav1_writel(imxdma, imxdmac->per_address,
586 DMA_SAR(imxdmac->channel));
587 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
588 DMA_CCR(imxdmac->channel));
589
590 dev_dbg(imxdma->dev,
591 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
592 __func__, imxdmac->channel,
593 d->sg, d->sgcount, d->len,
594 (unsigned long long)imxdmac->per_address);
595 } else if (d->direction == DMA_MEM_TO_DEV) {
596 imx_dmav1_writel(imxdma, imxdmac->per_address,
597 DMA_DAR(imxdmac->channel));
598 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
599 DMA_CCR(imxdmac->channel));
600
601 dev_dbg(imxdma->dev,
602 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
603 __func__, imxdmac->channel,
604 d->sg, d->sgcount, d->len,
605 (unsigned long long)imxdmac->per_address);
606 } else {
607 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
608 __func__, imxdmac->channel);
609 return -EINVAL;
610 }
611
612 imxdma_sg_next(d);
613
614 break;
615 default:
616 return -EINVAL;
617 }
618 imxdma_enable_hw(d);
619 return 0;
620 }
621
622 static void imxdma_tasklet(unsigned long data)
623 {
624 struct imxdma_channel *imxdmac = (void *)data;
625 struct imxdma_engine *imxdma = imxdmac->imxdma;
626 struct imxdma_desc *desc;
627 unsigned long flags;
628
629 spin_lock_irqsave(&imxdma->lock, flags);
630
631 if (list_empty(&imxdmac->ld_active)) {
632 /* Someone might have called terminate all */
633 spin_unlock_irqrestore(&imxdma->lock, flags);
634 return;
635 }
636 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
637
638 /* If we are dealing with a cyclic descriptor, keep it on ld_active
639 * and dont mark the descriptor as complete.
640 * Only in non-cyclic cases it would be marked as complete
641 */
642 if (imxdma_chan_is_doing_cyclic(imxdmac))
643 goto out;
644 else
645 dma_cookie_complete(&desc->desc);
646
647 /* Free 2D slot if it was an interleaved transfer */
648 if (imxdmac->enabled_2d) {
649 imxdma->slots_2d[imxdmac->slot_2d].count--;
650 imxdmac->enabled_2d = false;
651 }
652
653 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
654
655 if (!list_empty(&imxdmac->ld_queue)) {
656 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
657 node);
658 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
659 if (imxdma_xfer_desc(desc) < 0)
660 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
661 __func__, imxdmac->channel);
662 }
663 out:
664 spin_unlock_irqrestore(&imxdma->lock, flags);
665
666 if (desc->desc.callback)
667 desc->desc.callback(desc->desc.callback_param);
668
669 }
670
671 static int imxdma_terminate_all(struct dma_chan *chan)
672 {
673 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
674 struct imxdma_engine *imxdma = imxdmac->imxdma;
675 unsigned long flags;
676
677 imxdma_disable_hw(imxdmac);
678
679 spin_lock_irqsave(&imxdma->lock, flags);
680 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
681 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
682 spin_unlock_irqrestore(&imxdma->lock, flags);
683 return 0;
684 }
685
686 static int imxdma_config(struct dma_chan *chan,
687 struct dma_slave_config *dmaengine_cfg)
688 {
689 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
690 struct imxdma_engine *imxdma = imxdmac->imxdma;
691 unsigned int mode = 0;
692
693 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
694 imxdmac->per_address = dmaengine_cfg->src_addr;
695 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
696 imxdmac->word_size = dmaengine_cfg->src_addr_width;
697 } else {
698 imxdmac->per_address = dmaengine_cfg->dst_addr;
699 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
700 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
701 }
702
703 switch (imxdmac->word_size) {
704 case DMA_SLAVE_BUSWIDTH_1_BYTE:
705 mode = IMX_DMA_MEMSIZE_8;
706 break;
707 case DMA_SLAVE_BUSWIDTH_2_BYTES:
708 mode = IMX_DMA_MEMSIZE_16;
709 break;
710 default:
711 case DMA_SLAVE_BUSWIDTH_4_BYTES:
712 mode = IMX_DMA_MEMSIZE_32;
713 break;
714 }
715
716 imxdmac->hw_chaining = 0;
717
718 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
719 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
720 CCR_REN;
721 imxdmac->ccr_to_device =
722 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
723 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
724 imx_dmav1_writel(imxdma, imxdmac->dma_request,
725 DMA_RSSR(imxdmac->channel));
726
727 /* Set burst length */
728 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
729 imxdmac->word_size, DMA_BLR(imxdmac->channel));
730
731 return 0;
732 }
733
734 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
735 dma_cookie_t cookie,
736 struct dma_tx_state *txstate)
737 {
738 return dma_cookie_status(chan, cookie, txstate);
739 }
740
741 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
742 {
743 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
744 struct imxdma_engine *imxdma = imxdmac->imxdma;
745 dma_cookie_t cookie;
746 unsigned long flags;
747
748 spin_lock_irqsave(&imxdma->lock, flags);
749 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
750 cookie = dma_cookie_assign(tx);
751 spin_unlock_irqrestore(&imxdma->lock, flags);
752
753 return cookie;
754 }
755
756 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
757 {
758 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
759 struct imx_dma_data *data = chan->private;
760
761 if (data != NULL)
762 imxdmac->dma_request = data->dma_request;
763
764 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
765 struct imxdma_desc *desc;
766
767 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
768 if (!desc)
769 break;
770 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
771 dma_async_tx_descriptor_init(&desc->desc, chan);
772 desc->desc.tx_submit = imxdma_tx_submit;
773 /* txd.flags will be overwritten in prep funcs */
774 desc->desc.flags = DMA_CTRL_ACK;
775 desc->status = DMA_COMPLETE;
776
777 list_add_tail(&desc->node, &imxdmac->ld_free);
778 imxdmac->descs_allocated++;
779 }
780
781 if (!imxdmac->descs_allocated)
782 return -ENOMEM;
783
784 return imxdmac->descs_allocated;
785 }
786
787 static void imxdma_free_chan_resources(struct dma_chan *chan)
788 {
789 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
790 struct imxdma_engine *imxdma = imxdmac->imxdma;
791 struct imxdma_desc *desc, *_desc;
792 unsigned long flags;
793
794 spin_lock_irqsave(&imxdma->lock, flags);
795
796 imxdma_disable_hw(imxdmac);
797 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
798 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
799
800 spin_unlock_irqrestore(&imxdma->lock, flags);
801
802 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
803 kfree(desc);
804 imxdmac->descs_allocated--;
805 }
806 INIT_LIST_HEAD(&imxdmac->ld_free);
807
808 kfree(imxdmac->sg_list);
809 imxdmac->sg_list = NULL;
810 }
811
812 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
813 struct dma_chan *chan, struct scatterlist *sgl,
814 unsigned int sg_len, enum dma_transfer_direction direction,
815 unsigned long flags, void *context)
816 {
817 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
818 struct scatterlist *sg;
819 int i, dma_length = 0;
820 struct imxdma_desc *desc;
821
822 if (list_empty(&imxdmac->ld_free) ||
823 imxdma_chan_is_doing_cyclic(imxdmac))
824 return NULL;
825
826 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
827
828 for_each_sg(sgl, sg, sg_len, i) {
829 dma_length += sg_dma_len(sg);
830 }
831
832 switch (imxdmac->word_size) {
833 case DMA_SLAVE_BUSWIDTH_4_BYTES:
834 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
835 return NULL;
836 break;
837 case DMA_SLAVE_BUSWIDTH_2_BYTES:
838 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
839 return NULL;
840 break;
841 case DMA_SLAVE_BUSWIDTH_1_BYTE:
842 break;
843 default:
844 return NULL;
845 }
846
847 desc->type = IMXDMA_DESC_SLAVE_SG;
848 desc->sg = sgl;
849 desc->sgcount = sg_len;
850 desc->len = dma_length;
851 desc->direction = direction;
852 if (direction == DMA_DEV_TO_MEM) {
853 desc->src = imxdmac->per_address;
854 } else {
855 desc->dest = imxdmac->per_address;
856 }
857 desc->desc.callback = NULL;
858 desc->desc.callback_param = NULL;
859
860 return &desc->desc;
861 }
862
863 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
864 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
865 size_t period_len, enum dma_transfer_direction direction,
866 unsigned long flags)
867 {
868 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
869 struct imxdma_engine *imxdma = imxdmac->imxdma;
870 struct imxdma_desc *desc;
871 int i;
872 unsigned int periods = buf_len / period_len;
873
874 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
875 __func__, imxdmac->channel, buf_len, period_len);
876
877 if (list_empty(&imxdmac->ld_free) ||
878 imxdma_chan_is_doing_cyclic(imxdmac))
879 return NULL;
880
881 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
882
883 kfree(imxdmac->sg_list);
884
885 imxdmac->sg_list = kcalloc(periods + 1,
886 sizeof(struct scatterlist), GFP_ATOMIC);
887 if (!imxdmac->sg_list)
888 return NULL;
889
890 sg_init_table(imxdmac->sg_list, periods);
891
892 for (i = 0; i < periods; i++) {
893 imxdmac->sg_list[i].page_link = 0;
894 imxdmac->sg_list[i].offset = 0;
895 imxdmac->sg_list[i].dma_address = dma_addr;
896 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
897 dma_addr += period_len;
898 }
899
900 /* close the loop */
901 imxdmac->sg_list[periods].offset = 0;
902 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
903 imxdmac->sg_list[periods].page_link =
904 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
905
906 desc->type = IMXDMA_DESC_CYCLIC;
907 desc->sg = imxdmac->sg_list;
908 desc->sgcount = periods;
909 desc->len = IMX_DMA_LENGTH_LOOP;
910 desc->direction = direction;
911 if (direction == DMA_DEV_TO_MEM) {
912 desc->src = imxdmac->per_address;
913 } else {
914 desc->dest = imxdmac->per_address;
915 }
916 desc->desc.callback = NULL;
917 desc->desc.callback_param = NULL;
918
919 return &desc->desc;
920 }
921
922 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
923 struct dma_chan *chan, dma_addr_t dest,
924 dma_addr_t src, size_t len, unsigned long flags)
925 {
926 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
927 struct imxdma_engine *imxdma = imxdmac->imxdma;
928 struct imxdma_desc *desc;
929
930 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
931 __func__, imxdmac->channel, (unsigned long long)src,
932 (unsigned long long)dest, len);
933
934 if (list_empty(&imxdmac->ld_free) ||
935 imxdma_chan_is_doing_cyclic(imxdmac))
936 return NULL;
937
938 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
939
940 desc->type = IMXDMA_DESC_MEMCPY;
941 desc->src = src;
942 desc->dest = dest;
943 desc->len = len;
944 desc->direction = DMA_MEM_TO_MEM;
945 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
946 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
947 desc->desc.callback = NULL;
948 desc->desc.callback_param = NULL;
949
950 return &desc->desc;
951 }
952
953 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
954 struct dma_chan *chan, struct dma_interleaved_template *xt,
955 unsigned long flags)
956 {
957 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
958 struct imxdma_engine *imxdma = imxdmac->imxdma;
959 struct imxdma_desc *desc;
960
961 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
962 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
963 imxdmac->channel, (unsigned long long)xt->src_start,
964 (unsigned long long) xt->dst_start,
965 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
966 xt->numf, xt->frame_size);
967
968 if (list_empty(&imxdmac->ld_free) ||
969 imxdma_chan_is_doing_cyclic(imxdmac))
970 return NULL;
971
972 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
973 return NULL;
974
975 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
976
977 desc->type = IMXDMA_DESC_INTERLEAVED;
978 desc->src = xt->src_start;
979 desc->dest = xt->dst_start;
980 desc->x = xt->sgl[0].size;
981 desc->y = xt->numf;
982 desc->w = xt->sgl[0].icg + desc->x;
983 desc->len = desc->x * desc->y;
984 desc->direction = DMA_MEM_TO_MEM;
985 desc->config_port = IMX_DMA_MEMSIZE_32;
986 desc->config_mem = IMX_DMA_MEMSIZE_32;
987 if (xt->src_sgl)
988 desc->config_mem |= IMX_DMA_TYPE_2D;
989 if (xt->dst_sgl)
990 desc->config_port |= IMX_DMA_TYPE_2D;
991 desc->desc.callback = NULL;
992 desc->desc.callback_param = NULL;
993
994 return &desc->desc;
995 }
996
997 static void imxdma_issue_pending(struct dma_chan *chan)
998 {
999 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
1000 struct imxdma_engine *imxdma = imxdmac->imxdma;
1001 struct imxdma_desc *desc;
1002 unsigned long flags;
1003
1004 spin_lock_irqsave(&imxdma->lock, flags);
1005 if (list_empty(&imxdmac->ld_active) &&
1006 !list_empty(&imxdmac->ld_queue)) {
1007 desc = list_first_entry(&imxdmac->ld_queue,
1008 struct imxdma_desc, node);
1009
1010 if (imxdma_xfer_desc(desc) < 0) {
1011 dev_warn(imxdma->dev,
1012 "%s: channel: %d couldn't issue DMA xfer\n",
1013 __func__, imxdmac->channel);
1014 } else {
1015 list_move_tail(imxdmac->ld_queue.next,
1016 &imxdmac->ld_active);
1017 }
1018 }
1019 spin_unlock_irqrestore(&imxdma->lock, flags);
1020 }
1021
1022 static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1023 {
1024 struct imxdma_filter_data *fdata = param;
1025 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1026
1027 if (chan->device->dev != fdata->imxdma->dev)
1028 return false;
1029
1030 imxdma_chan->dma_request = fdata->request;
1031 chan->private = NULL;
1032
1033 return true;
1034 }
1035
1036 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1037 struct of_dma *ofdma)
1038 {
1039 int count = dma_spec->args_count;
1040 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1041 struct imxdma_filter_data fdata = {
1042 .imxdma = imxdma,
1043 };
1044
1045 if (count != 1)
1046 return NULL;
1047
1048 fdata.request = dma_spec->args[0];
1049
1050 return dma_request_channel(imxdma->dma_device.cap_mask,
1051 imxdma_filter_fn, &fdata);
1052 }
1053
1054 static int __init imxdma_probe(struct platform_device *pdev)
1055 {
1056 struct imxdma_engine *imxdma;
1057 struct resource *res;
1058 const struct of_device_id *of_id;
1059 int ret, i;
1060 int irq, irq_err;
1061
1062 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1063 if (of_id)
1064 pdev->id_entry = of_id->data;
1065
1066 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1067 if (!imxdma)
1068 return -ENOMEM;
1069
1070 imxdma->dev = &pdev->dev;
1071 imxdma->devtype = pdev->id_entry->driver_data;
1072
1073 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1074 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1075 if (IS_ERR(imxdma->base))
1076 return PTR_ERR(imxdma->base);
1077
1078 irq = platform_get_irq(pdev, 0);
1079 if (irq < 0)
1080 return irq;
1081
1082 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1083 if (IS_ERR(imxdma->dma_ipg))
1084 return PTR_ERR(imxdma->dma_ipg);
1085
1086 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1087 if (IS_ERR(imxdma->dma_ahb))
1088 return PTR_ERR(imxdma->dma_ahb);
1089
1090 ret = clk_prepare_enable(imxdma->dma_ipg);
1091 if (ret)
1092 return ret;
1093 ret = clk_prepare_enable(imxdma->dma_ahb);
1094 if (ret)
1095 goto disable_dma_ipg_clk;
1096
1097 /* reset DMA module */
1098 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1099
1100 if (is_imx1_dma(imxdma)) {
1101 ret = devm_request_irq(&pdev->dev, irq,
1102 dma_irq_handler, 0, "DMA", imxdma);
1103 if (ret) {
1104 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1105 goto disable_dma_ahb_clk;
1106 }
1107 imxdma->irq = irq;
1108
1109 irq_err = platform_get_irq(pdev, 1);
1110 if (irq_err < 0) {
1111 ret = irq_err;
1112 goto disable_dma_ahb_clk;
1113 }
1114
1115 ret = devm_request_irq(&pdev->dev, irq_err,
1116 imxdma_err_handler, 0, "DMA", imxdma);
1117 if (ret) {
1118 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1119 goto disable_dma_ahb_clk;
1120 }
1121 imxdma->irq_err = irq_err;
1122 }
1123
1124 /* enable DMA module */
1125 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1126
1127 /* clear all interrupts */
1128 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1129
1130 /* disable interrupts */
1131 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1132
1133 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1134
1135 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1136 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1137 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1138 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1139
1140 /* Initialize 2D global parameters */
1141 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1142 imxdma->slots_2d[i].count = 0;
1143
1144 spin_lock_init(&imxdma->lock);
1145
1146 /* Initialize channel parameters */
1147 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1148 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1149
1150 if (!is_imx1_dma(imxdma)) {
1151 ret = devm_request_irq(&pdev->dev, irq + i,
1152 dma_irq_handler, 0, "DMA", imxdma);
1153 if (ret) {
1154 dev_warn(imxdma->dev, "Can't register IRQ %d "
1155 "for DMA channel %d\n",
1156 irq + i, i);
1157 goto disable_dma_ahb_clk;
1158 }
1159
1160 imxdmac->irq = irq + i;
1161 init_timer(&imxdmac->watchdog);
1162 imxdmac->watchdog.function = &imxdma_watchdog;
1163 imxdmac->watchdog.data = (unsigned long)imxdmac;
1164 }
1165
1166 imxdmac->imxdma = imxdma;
1167
1168 INIT_LIST_HEAD(&imxdmac->ld_queue);
1169 INIT_LIST_HEAD(&imxdmac->ld_free);
1170 INIT_LIST_HEAD(&imxdmac->ld_active);
1171
1172 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1173 (unsigned long)imxdmac);
1174 imxdmac->chan.device = &imxdma->dma_device;
1175 dma_cookie_init(&imxdmac->chan);
1176 imxdmac->channel = i;
1177
1178 /* Add the channel to the DMAC list */
1179 list_add_tail(&imxdmac->chan.device_node,
1180 &imxdma->dma_device.channels);
1181 }
1182
1183 imxdma->dma_device.dev = &pdev->dev;
1184
1185 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1186 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1187 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1188 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1189 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1190 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1191 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1192 imxdma->dma_device.device_config = imxdma_config;
1193 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1194 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1195
1196 platform_set_drvdata(pdev, imxdma);
1197
1198 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1199 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1200 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1201
1202 ret = dma_async_device_register(&imxdma->dma_device);
1203 if (ret) {
1204 dev_err(&pdev->dev, "unable to register\n");
1205 goto disable_dma_ahb_clk;
1206 }
1207
1208 if (pdev->dev.of_node) {
1209 ret = of_dma_controller_register(pdev->dev.of_node,
1210 imxdma_xlate, imxdma);
1211 if (ret) {
1212 dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1213 goto err_of_dma_controller;
1214 }
1215 }
1216
1217 return 0;
1218
1219 err_of_dma_controller:
1220 dma_async_device_unregister(&imxdma->dma_device);
1221 disable_dma_ahb_clk:
1222 clk_disable_unprepare(imxdma->dma_ahb);
1223 disable_dma_ipg_clk:
1224 clk_disable_unprepare(imxdma->dma_ipg);
1225 return ret;
1226 }
1227
1228 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1229 {
1230 int i;
1231
1232 if (is_imx1_dma(imxdma)) {
1233 disable_irq(imxdma->irq);
1234 disable_irq(imxdma->irq_err);
1235 }
1236
1237 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1238 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1239
1240 if (!is_imx1_dma(imxdma))
1241 disable_irq(imxdmac->irq);
1242
1243 tasklet_kill(&imxdmac->dma_tasklet);
1244 }
1245 }
1246
1247 static int imxdma_remove(struct platform_device *pdev)
1248 {
1249 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1250
1251 imxdma_free_irq(pdev, imxdma);
1252
1253 dma_async_device_unregister(&imxdma->dma_device);
1254
1255 if (pdev->dev.of_node)
1256 of_dma_controller_free(pdev->dev.of_node);
1257
1258 clk_disable_unprepare(imxdma->dma_ipg);
1259 clk_disable_unprepare(imxdma->dma_ahb);
1260
1261 return 0;
1262 }
1263
1264 static struct platform_driver imxdma_driver = {
1265 .driver = {
1266 .name = "imx-dma",
1267 .of_match_table = imx_dma_of_dev_id,
1268 },
1269 .id_table = imx_dma_devtype,
1270 .remove = imxdma_remove,
1271 };
1272
1273 static int __init imxdma_module_init(void)
1274 {
1275 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1276 }
1277 subsys_initcall(imxdma_module_init);
1278
1279 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1280 MODULE_DESCRIPTION("i.MX dma driver");
1281 MODULE_LICENSE("GPL");
This page took 0.057133 seconds and 6 git commands to generate.