printk: add and use LOGLEVEL_<level> defines for KERN_<LEVEL> equivalents
[deliverable/linux.git] / drivers / dma / fsl-edma.c
CommitLineData
d6be34fb
JL
1/*
2 * drivers/dma/fsl-edma.c
3 *
4 * Copyright 2013-2014 Freescale Semiconductor, Inc.
5 *
6 * Driver for the Freescale eDMA engine with flexible channel multiplexing
7 * capability for DMA request sources. The eDMA block can be found on some
8 * Vybrid and Layerscape SoCs.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/interrupt.h>
19#include <linux/clk.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_dma.h>
29
30#include "virt-dma.h"
31
32#define EDMA_CR 0x00
33#define EDMA_ES 0x04
34#define EDMA_ERQ 0x0C
35#define EDMA_EEI 0x14
36#define EDMA_SERQ 0x1B
37#define EDMA_CERQ 0x1A
38#define EDMA_SEEI 0x19
39#define EDMA_CEEI 0x18
40#define EDMA_CINT 0x1F
41#define EDMA_CERR 0x1E
42#define EDMA_SSRT 0x1D
43#define EDMA_CDNE 0x1C
44#define EDMA_INTR 0x24
45#define EDMA_ERR 0x2C
46
47#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
48#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
49#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
50#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
51#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
52#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
53#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
54#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
55#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
56#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
57#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
58#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
59#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
60
61#define EDMA_CR_EDBG BIT(1)
62#define EDMA_CR_ERCA BIT(2)
63#define EDMA_CR_ERGA BIT(3)
64#define EDMA_CR_HOE BIT(4)
65#define EDMA_CR_HALT BIT(5)
66#define EDMA_CR_CLM BIT(6)
67#define EDMA_CR_EMLM BIT(7)
68#define EDMA_CR_ECX BIT(16)
69#define EDMA_CR_CX BIT(17)
70
71#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
72#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
73#define EDMA_CINT_CINT(x) ((x) & 0x1F)
74#define EDMA_CERR_CERR(x) ((x) & 0x1F)
75
76#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
77#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
78#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
79#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
80#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
81#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
82#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
83#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
84#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
85#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
86#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
87#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
88#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
89#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
90
91#define EDMA_TCD_SOFF_SOFF(x) (x)
92#define EDMA_TCD_NBYTES_NBYTES(x) (x)
93#define EDMA_TCD_SLAST_SLAST(x) (x)
94#define EDMA_TCD_DADDR_DADDR(x) (x)
95#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
96#define EDMA_TCD_DOFF_DOFF(x) (x)
97#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
98#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
99
100#define EDMA_TCD_CSR_START BIT(0)
101#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
102#define EDMA_TCD_CSR_INT_HALF BIT(2)
103#define EDMA_TCD_CSR_D_REQ BIT(3)
104#define EDMA_TCD_CSR_E_SG BIT(4)
105#define EDMA_TCD_CSR_E_LINK BIT(5)
106#define EDMA_TCD_CSR_ACTIVE BIT(6)
107#define EDMA_TCD_CSR_DONE BIT(7)
108
109#define EDMAMUX_CHCFG_DIS 0x0
110#define EDMAMUX_CHCFG_ENBL 0x80
111#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
112
113#define DMAMUX_NR 2
114
115#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119
120struct fsl_edma_hw_tcd {
121 u32 saddr;
122 u16 soff;
123 u16 attr;
124 u32 nbytes;
125 u32 slast;
126 u32 daddr;
127 u16 doff;
128 u16 citer;
129 u32 dlast_sga;
130 u16 csr;
131 u16 biter;
132};
133
134struct fsl_edma_sw_tcd {
135 dma_addr_t ptcd;
136 struct fsl_edma_hw_tcd *vtcd;
137};
138
139struct fsl_edma_slave_config {
140 enum dma_transfer_direction dir;
141 enum dma_slave_buswidth addr_width;
142 u32 dev_addr;
143 u32 burst;
144 u32 attr;
145};
146
147struct fsl_edma_chan {
148 struct virt_dma_chan vchan;
149 enum dma_status status;
150 struct fsl_edma_engine *edma;
151 struct fsl_edma_desc *edesc;
152 struct fsl_edma_slave_config fsc;
153 struct dma_pool *tcd_pool;
154};
155
156struct fsl_edma_desc {
157 struct virt_dma_desc vdesc;
158 struct fsl_edma_chan *echan;
159 bool iscyclic;
160 unsigned int n_tcds;
161 struct fsl_edma_sw_tcd tcd[];
162};
163
164struct fsl_edma_engine {
165 struct dma_device dma_dev;
166 void __iomem *membase;
167 void __iomem *muxbase[DMAMUX_NR];
168 struct clk *muxclk[DMAMUX_NR];
169 struct mutex fsl_edma_mutex;
170 u32 n_chans;
171 int txirq;
172 int errirq;
173 bool big_endian;
174 struct fsl_edma_chan chans[];
175};
176
177/*
178 * R/W functions for big- or little-endian registers
179 * the eDMA controller's endian is independent of the CPU core's endian.
180 */
181
182static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
183{
184 if (edma->big_endian)
185 return ioread16be(addr);
186 else
187 return ioread16(addr);
188}
189
190static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
191{
192 if (edma->big_endian)
193 return ioread32be(addr);
194 else
195 return ioread32(addr);
196}
197
198static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
199{
200 iowrite8(val, addr);
201}
202
203static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
204{
205 if (edma->big_endian)
206 iowrite16be(val, addr);
207 else
208 iowrite16(val, addr);
209}
210
211static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
212{
213 if (edma->big_endian)
214 iowrite32be(val, addr);
215 else
216 iowrite32(val, addr);
217}
218
219static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
220{
221 return container_of(chan, struct fsl_edma_chan, vchan.chan);
222}
223
224static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
225{
226 return container_of(vd, struct fsl_edma_desc, vdesc);
227}
228
229static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
230{
231 void __iomem *addr = fsl_chan->edma->membase;
232 u32 ch = fsl_chan->vchan.chan.chan_id;
233
234 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
235 edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
236}
237
238static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
239{
240 void __iomem *addr = fsl_chan->edma->membase;
241 u32 ch = fsl_chan->vchan.chan.chan_id;
242
243 edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
244 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
245}
246
247static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
248 unsigned int slot, bool enable)
249{
250 u32 ch = fsl_chan->vchan.chan.chan_id;
211bfef7 251 void __iomem *muxaddr;
d6be34fb
JL
252 unsigned chans_per_mux, ch_off;
253
254 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
255 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
211bfef7 256 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
d6be34fb
JL
257
258 if (enable)
259 edma_writeb(fsl_chan->edma,
260 EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
261 muxaddr + ch_off);
262 else
263 edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
264}
265
266static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
267{
268 switch (addr_width) {
269 case 1:
270 return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
271 case 2:
272 return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
273 case 4:
274 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
275 case 8:
276 return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
277 default:
278 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
279 }
280}
281
282static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
283{
284 struct fsl_edma_desc *fsl_desc;
285 int i;
286
287 fsl_desc = to_fsl_edma_desc(vdesc);
288 for (i = 0; i < fsl_desc->n_tcds; i++)
289 dma_pool_free(fsl_desc->echan->tcd_pool,
290 fsl_desc->tcd[i].vtcd,
291 fsl_desc->tcd[i].ptcd);
292 kfree(fsl_desc);
293}
294
295static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
296 unsigned long arg)
297{
298 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
299 struct dma_slave_config *cfg = (void *)arg;
300 unsigned long flags;
301 LIST_HEAD(head);
302
303 switch (cmd) {
304 case DMA_TERMINATE_ALL:
305 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
306 fsl_edma_disable_request(fsl_chan);
307 fsl_chan->edesc = NULL;
308 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
309 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
310 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
311 return 0;
312
313 case DMA_SLAVE_CONFIG:
314 fsl_chan->fsc.dir = cfg->direction;
315 if (cfg->direction == DMA_DEV_TO_MEM) {
316 fsl_chan->fsc.dev_addr = cfg->src_addr;
317 fsl_chan->fsc.addr_width = cfg->src_addr_width;
318 fsl_chan->fsc.burst = cfg->src_maxburst;
319 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
320 } else if (cfg->direction == DMA_MEM_TO_DEV) {
321 fsl_chan->fsc.dev_addr = cfg->dst_addr;
322 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
323 fsl_chan->fsc.burst = cfg->dst_maxburst;
324 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
325 } else {
326 return -EINVAL;
327 }
328 return 0;
329
330 case DMA_PAUSE:
331 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
332 if (fsl_chan->edesc) {
333 fsl_edma_disable_request(fsl_chan);
334 fsl_chan->status = DMA_PAUSED;
335 }
336 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
337 return 0;
338
339 case DMA_RESUME:
340 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
341 if (fsl_chan->edesc) {
342 fsl_edma_enable_request(fsl_chan);
343 fsl_chan->status = DMA_IN_PROGRESS;
344 }
345 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
346 return 0;
347
348 default:
349 return -ENXIO;
350 }
351}
352
353static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
354 struct virt_dma_desc *vdesc, bool in_progress)
355{
356 struct fsl_edma_desc *edesc = fsl_chan->edesc;
357 void __iomem *addr = fsl_chan->edma->membase;
358 u32 ch = fsl_chan->vchan.chan.chan_id;
359 enum dma_transfer_direction dir = fsl_chan->fsc.dir;
360 dma_addr_t cur_addr, dma_addr;
361 size_t len, size;
362 int i;
363
364 /* calculate the total size in this desc */
365 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
366 len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
367 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
368
369 if (!in_progress)
370 return len;
371
372 if (dir == DMA_MEM_TO_DEV)
373 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
374 else
375 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
376
377 /* figure out the finished and calculate the residue */
378 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
379 size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
380 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
381 if (dir == DMA_MEM_TO_DEV)
382 dma_addr = edma_readl(fsl_chan->edma,
383 &(edesc->tcd[i].vtcd->saddr));
384 else
385 dma_addr = edma_readl(fsl_chan->edma,
386 &(edesc->tcd[i].vtcd->daddr));
387
388 len -= size;
389 if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
390 len += dma_addr + size - cur_addr;
391 break;
392 }
393 }
394
395 return len;
396}
397
398static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
399 dma_cookie_t cookie, struct dma_tx_state *txstate)
400{
401 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
402 struct virt_dma_desc *vdesc;
403 enum dma_status status;
404 unsigned long flags;
405
406 status = dma_cookie_status(chan, cookie, txstate);
407 if (status == DMA_COMPLETE)
408 return status;
409
410 if (!txstate)
411 return fsl_chan->status;
412
413 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
414 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
415 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
416 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
417 else if (vdesc)
418 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
419 else
420 txstate->residue = 0;
421
422 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
423
424 return fsl_chan->status;
425}
426
427static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
428 u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
429 u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
430 u16 csr)
431{
432 void __iomem *addr = fsl_chan->edma->membase;
433 u32 ch = fsl_chan->vchan.chan.chan_id;
434
435 /*
436 * TCD parameters have been swapped in fill_tcd_params(),
437 * so just write them to registers in the cpu endian here
438 */
439 writew(0, addr + EDMA_TCD_CSR(ch));
440 writel(src, addr + EDMA_TCD_SADDR(ch));
441 writel(dst, addr + EDMA_TCD_DADDR(ch));
442 writew(attr, addr + EDMA_TCD_ATTR(ch));
443 writew(soff, addr + EDMA_TCD_SOFF(ch));
444 writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
445 writel(slast, addr + EDMA_TCD_SLAST(ch));
446 writew(citer, addr + EDMA_TCD_CITER(ch));
447 writew(biter, addr + EDMA_TCD_BITER(ch));
448 writew(doff, addr + EDMA_TCD_DOFF(ch));
449 writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
450 writew(csr, addr + EDMA_TCD_CSR(ch));
451}
452
453static void fill_tcd_params(struct fsl_edma_engine *edma,
454 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
455 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
456 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
457 bool disable_req, bool enable_sg)
458{
459 u16 csr = 0;
460
461 /*
462 * eDMA hardware SGs require the TCD parameters stored in memory
463 * the same endian as the eDMA module so that they can be loaded
464 * automatically by the engine
465 */
466 edma_writel(edma, src, &(tcd->saddr));
467 edma_writel(edma, dst, &(tcd->daddr));
468 edma_writew(edma, attr, &(tcd->attr));
469 edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
470 edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
471 edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
472 edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
473 edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
474 edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
475 edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
476 if (major_int)
477 csr |= EDMA_TCD_CSR_INT_MAJOR;
478
479 if (disable_req)
480 csr |= EDMA_TCD_CSR_D_REQ;
481
482 if (enable_sg)
483 csr |= EDMA_TCD_CSR_E_SG;
484
485 edma_writew(edma, csr, &(tcd->csr));
486}
487
488static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
489 int sg_len)
490{
491 struct fsl_edma_desc *fsl_desc;
492 int i;
493
494 fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
495 GFP_NOWAIT);
496 if (!fsl_desc)
497 return NULL;
498
499 fsl_desc->echan = fsl_chan;
500 fsl_desc->n_tcds = sg_len;
501 for (i = 0; i < sg_len; i++) {
502 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
503 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
504 if (!fsl_desc->tcd[i].vtcd)
505 goto err;
506 }
507 return fsl_desc;
508
509err:
510 while (--i >= 0)
511 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
512 fsl_desc->tcd[i].ptcd);
513 kfree(fsl_desc);
514 return NULL;
515}
516
517static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
518 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
519 size_t period_len, enum dma_transfer_direction direction,
31c1e5a1 520 unsigned long flags)
d6be34fb
JL
521{
522 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
523 struct fsl_edma_desc *fsl_desc;
524 dma_addr_t dma_buf_next;
525 int sg_len, i;
526 u32 src_addr, dst_addr, last_sg, nbytes;
527 u16 soff, doff, iter;
528
529 if (!is_slave_direction(fsl_chan->fsc.dir))
530 return NULL;
531
532 sg_len = buf_len / period_len;
533 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
534 if (!fsl_desc)
535 return NULL;
536 fsl_desc->iscyclic = true;
537
538 dma_buf_next = dma_addr;
539 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
540 iter = period_len / nbytes;
541
542 for (i = 0; i < sg_len; i++) {
543 if (dma_buf_next >= dma_addr + buf_len)
544 dma_buf_next = dma_addr;
545
546 /* get next sg's physical address */
547 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
548
549 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
550 src_addr = dma_buf_next;
551 dst_addr = fsl_chan->fsc.dev_addr;
552 soff = fsl_chan->fsc.addr_width;
553 doff = 0;
554 } else {
555 src_addr = fsl_chan->fsc.dev_addr;
556 dst_addr = dma_buf_next;
557 soff = 0;
558 doff = fsl_chan->fsc.addr_width;
559 }
560
561 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
562 dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
563 iter, iter, doff, last_sg, true, false, true);
564 dma_buf_next += period_len;
565 }
566
567 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
568}
569
570static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
571 struct dma_chan *chan, struct scatterlist *sgl,
572 unsigned int sg_len, enum dma_transfer_direction direction,
573 unsigned long flags, void *context)
574{
575 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
576 struct fsl_edma_desc *fsl_desc;
577 struct scatterlist *sg;
578 u32 src_addr, dst_addr, last_sg, nbytes;
579 u16 soff, doff, iter;
580 int i;
581
582 if (!is_slave_direction(fsl_chan->fsc.dir))
583 return NULL;
584
585 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
586 if (!fsl_desc)
587 return NULL;
588 fsl_desc->iscyclic = false;
589
590 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
591 for_each_sg(sgl, sg, sg_len, i) {
592 /* get next sg's physical address */
593 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
594
595 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
596 src_addr = sg_dma_address(sg);
597 dst_addr = fsl_chan->fsc.dev_addr;
598 soff = fsl_chan->fsc.addr_width;
599 doff = 0;
600 } else {
601 src_addr = fsl_chan->fsc.dev_addr;
602 dst_addr = sg_dma_address(sg);
603 soff = 0;
604 doff = fsl_chan->fsc.addr_width;
605 }
606
607 iter = sg_dma_len(sg) / nbytes;
608 if (i < sg_len - 1) {
609 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
610 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
611 src_addr, dst_addr, fsl_chan->fsc.attr,
612 soff, nbytes, 0, iter, iter, doff, last_sg,
613 false, false, true);
614 } else {
615 last_sg = 0;
616 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
617 src_addr, dst_addr, fsl_chan->fsc.attr,
618 soff, nbytes, 0, iter, iter, doff, last_sg,
619 true, true, false);
620 }
621 }
622
623 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
624}
625
626static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
627{
628 struct fsl_edma_hw_tcd *tcd;
629 struct virt_dma_desc *vdesc;
630
631 vdesc = vchan_next_desc(&fsl_chan->vchan);
632 if (!vdesc)
633 return;
634 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
635 tcd = fsl_chan->edesc->tcd[0].vtcd;
636 fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
637 tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
638 tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
639 fsl_edma_enable_request(fsl_chan);
640 fsl_chan->status = DMA_IN_PROGRESS;
641}
642
643static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
644{
645 struct fsl_edma_engine *fsl_edma = dev_id;
646 unsigned int intr, ch;
647 void __iomem *base_addr;
648 struct fsl_edma_chan *fsl_chan;
649
650 base_addr = fsl_edma->membase;
651
652 intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
653 if (!intr)
654 return IRQ_NONE;
655
656 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
657 if (intr & (0x1 << ch)) {
658 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
659 base_addr + EDMA_CINT);
660
661 fsl_chan = &fsl_edma->chans[ch];
662
663 spin_lock(&fsl_chan->vchan.lock);
664 if (!fsl_chan->edesc->iscyclic) {
665 list_del(&fsl_chan->edesc->vdesc.node);
666 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
667 fsl_chan->edesc = NULL;
668 fsl_chan->status = DMA_COMPLETE;
669 } else {
670 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
671 }
672
673 if (!fsl_chan->edesc)
674 fsl_edma_xfer_desc(fsl_chan);
675
676 spin_unlock(&fsl_chan->vchan.lock);
677 }
678 }
679 return IRQ_HANDLED;
680}
681
682static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
683{
684 struct fsl_edma_engine *fsl_edma = dev_id;
685 unsigned int err, ch;
686
687 err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
688 if (!err)
689 return IRQ_NONE;
690
691 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
692 if (err & (0x1 << ch)) {
693 fsl_edma_disable_request(&fsl_edma->chans[ch]);
694 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
695 fsl_edma->membase + EDMA_CERR);
696 fsl_edma->chans[ch].status = DMA_ERROR;
697 }
698 }
699 return IRQ_HANDLED;
700}
701
702static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
703{
704 if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
705 return IRQ_HANDLED;
706
707 return fsl_edma_err_handler(irq, dev_id);
708}
709
710static void fsl_edma_issue_pending(struct dma_chan *chan)
711{
712 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
713 unsigned long flags;
714
715 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
716
717 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
718 fsl_edma_xfer_desc(fsl_chan);
719
720 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
721}
722
723static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
724 struct of_dma *ofdma)
725{
726 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
178c81e5 727 struct dma_chan *chan, *_chan;
211bfef7 728 unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
d6be34fb
JL
729
730 if (dma_spec->args_count != 2)
731 return NULL;
732
733 mutex_lock(&fsl_edma->fsl_edma_mutex);
178c81e5 734 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
d6be34fb
JL
735 if (chan->client_count)
736 continue;
211bfef7 737 if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
d6be34fb
JL
738 chan = dma_get_slave_channel(chan);
739 if (chan) {
740 chan->device->privatecnt++;
741 fsl_edma_chan_mux(to_fsl_edma_chan(chan),
742 dma_spec->args[1], true);
743 mutex_unlock(&fsl_edma->fsl_edma_mutex);
744 return chan;
745 }
746 }
747 }
748 mutex_unlock(&fsl_edma->fsl_edma_mutex);
749 return NULL;
750}
751
752static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
753{
754 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
755
756 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
757 sizeof(struct fsl_edma_hw_tcd),
758 32, 0);
759 return 0;
760}
761
762static void fsl_edma_free_chan_resources(struct dma_chan *chan)
763{
764 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
765 unsigned long flags;
766 LIST_HEAD(head);
767
768 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
769 fsl_edma_disable_request(fsl_chan);
770 fsl_edma_chan_mux(fsl_chan, 0, false);
771 fsl_chan->edesc = NULL;
772 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
773 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
774
775 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
776 dma_pool_destroy(fsl_chan->tcd_pool);
777 fsl_chan->tcd_pool = NULL;
778}
779
780static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
781 struct dma_slave_caps *caps)
782{
783 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
784 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
785 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
786 caps->cmd_pause = true;
787 caps->cmd_terminate = true;
788
789 return 0;
790}
791
792static int
793fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
794{
795 int ret;
796
797 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
798 if (fsl_edma->txirq < 0) {
799 dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
800 return fsl_edma->txirq;
801 }
802
803 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
804 if (fsl_edma->errirq < 0) {
805 dev_err(&pdev->dev, "Can't get edma-err irq.\n");
806 return fsl_edma->errirq;
807 }
808
809 if (fsl_edma->txirq == fsl_edma->errirq) {
810 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
811 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
812 if (ret) {
813 dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
814 return ret;
815 }
816 } else {
817 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
818 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
819 if (ret) {
820 dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
821 return ret;
822 }
823
824 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
825 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
826 if (ret) {
827 dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
828 return ret;
829 }
830 }
831
832 return 0;
833}
834
835static int fsl_edma_probe(struct platform_device *pdev)
836{
837 struct device_node *np = pdev->dev.of_node;
838 struct fsl_edma_engine *fsl_edma;
839 struct fsl_edma_chan *fsl_chan;
840 struct resource *res;
841 int len, chans;
842 int ret, i;
843
844 ret = of_property_read_u32(np, "dma-channels", &chans);
845 if (ret) {
846 dev_err(&pdev->dev, "Can't get dma-channels.\n");
847 return ret;
848 }
849
850 len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
851 fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
852 if (!fsl_edma)
853 return -ENOMEM;
854
855 fsl_edma->n_chans = chans;
856 mutex_init(&fsl_edma->fsl_edma_mutex);
857
858 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
859 fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
860 if (IS_ERR(fsl_edma->membase))
861 return PTR_ERR(fsl_edma->membase);
862
863 for (i = 0; i < DMAMUX_NR; i++) {
864 char clkname[32];
865
866 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
867 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
868 if (IS_ERR(fsl_edma->muxbase[i]))
869 return PTR_ERR(fsl_edma->muxbase[i]);
870
871 sprintf(clkname, "dmamux%d", i);
872 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
873 if (IS_ERR(fsl_edma->muxclk[i])) {
874 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
875 return PTR_ERR(fsl_edma->muxclk[i]);
876 }
877
878 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
879 if (ret) {
880 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
881 return ret;
882 }
883
884 }
885
886 ret = fsl_edma_irq_init(pdev, fsl_edma);
887 if (ret)
888 return ret;
889
890 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
891
892 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
893 for (i = 0; i < fsl_edma->n_chans; i++) {
894 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
895
896 fsl_chan->edma = fsl_edma;
897
898 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
899 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
900
901 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
902 fsl_edma_chan_mux(fsl_chan, 0, false);
903 }
904
905 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
906 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
907 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
908
909 fsl_edma->dma_dev.dev = &pdev->dev;
910 fsl_edma->dma_dev.device_alloc_chan_resources
911 = fsl_edma_alloc_chan_resources;
912 fsl_edma->dma_dev.device_free_chan_resources
913 = fsl_edma_free_chan_resources;
914 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
915 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
916 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
917 fsl_edma->dma_dev.device_control = fsl_edma_control;
918 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
919 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
920
921 platform_set_drvdata(pdev, fsl_edma);
922
923 ret = dma_async_device_register(&fsl_edma->dma_dev);
924 if (ret) {
925 dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
926 return ret;
927 }
928
929 ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
930 if (ret) {
931 dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
932 dma_async_device_unregister(&fsl_edma->dma_dev);
933 return ret;
934 }
935
936 /* enable round robin arbitration */
937 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
938
939 return 0;
940}
941
942static int fsl_edma_remove(struct platform_device *pdev)
943{
944 struct device_node *np = pdev->dev.of_node;
945 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
946 int i;
947
948 of_dma_controller_free(np);
949 dma_async_device_unregister(&fsl_edma->dma_dev);
950
951 for (i = 0; i < DMAMUX_NR; i++)
952 clk_disable_unprepare(fsl_edma->muxclk[i]);
953
954 return 0;
955}
956
957static const struct of_device_id fsl_edma_dt_ids[] = {
958 { .compatible = "fsl,vf610-edma", },
959 { /* sentinel */ }
960};
961MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
962
963static struct platform_driver fsl_edma_driver = {
964 .driver = {
965 .name = "fsl-edma",
966 .owner = THIS_MODULE,
967 .of_match_table = fsl_edma_dt_ids,
968 },
969 .probe = fsl_edma_probe,
970 .remove = fsl_edma_remove,
971};
972
8edc51c1
YY
973static int __init fsl_edma_init(void)
974{
975 return platform_driver_register(&fsl_edma_driver);
976}
977subsys_initcall(fsl_edma_init);
978
979static void __exit fsl_edma_exit(void)
980{
981 platform_driver_unregister(&fsl_edma_driver);
982}
983module_exit(fsl_edma_exit);
d6be34fb
JL
984
985MODULE_ALIAS("platform:fsl-edma");
986MODULE_DESCRIPTION("Freescale eDMA engine driver");
987MODULE_LICENSE("GPL v2");
This page took 0.117182 seconds and 5 git commands to generate.