net/mlx5: Kconfig: Fix MLX5_EN/VXLAN build issue
[deliverable/linux.git] / drivers / dma / mmp_pdma.c
CommitLineData
c8acd6aa
ZG
1/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
2b7f65b1 8
7331205a 9#include <linux/err.h>
c8acd6aa
ZG
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/interrupt.h>
14#include <linux/dma-mapping.h>
15#include <linux/slab.h>
16#include <linux/dmaengine.h>
17#include <linux/platform_device.h>
18#include <linux/device.h>
19#include <linux/platform_data/mmp_dma.h>
20#include <linux/dmapool.h>
21#include <linux/of_device.h>
a9a7cf08 22#include <linux/of_dma.h>
c8acd6aa 23#include <linux/of.h>
13b3006b 24#include <linux/dma/mmp-pdma.h>
c8acd6aa
ZG
25
26#include "dmaengine.h"
27
28#define DCSR 0x0000
29#define DALGN 0x00a0
30#define DINT 0x00f0
31#define DDADR 0x0200
1b38da26
DM
32#define DSADR(n) (0x0204 + ((n) << 4))
33#define DTADR(n) (0x0208 + ((n) << 4))
c8acd6aa
ZG
34#define DCMD 0x020c
35
2b7f65b1
JP
36#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
37#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
38#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
39#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
40#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
41#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
42#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
43#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
44
45#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
46#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
47#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
48#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
49#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
50#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
51#define DCSR_EORINTR BIT(9) /* The end of Receive */
52
53#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
54#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
55#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
c8acd6aa
ZG
56
57#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
2b7f65b1
JP
58#define DDADR_STOP BIT(0) /* Stop (read / write) */
59
60#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
61#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
62#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
63#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
64#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
65#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
66#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
c8acd6aa
ZG
67#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
74
1ac0e845 75#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
c8acd6aa
ZG
76
77struct mmp_pdma_desc_hw {
78 u32 ddadr; /* Points to the next descriptor + flags */
79 u32 dsadr; /* DSADR value for the current transfer */
80 u32 dtadr; /* DTADR value for the current transfer */
81 u32 dcmd; /* DCMD value for the current transfer */
82} __aligned(32);
83
84struct mmp_pdma_desc_sw {
85 struct mmp_pdma_desc_hw desc;
86 struct list_head node;
87 struct list_head tx_list;
88 struct dma_async_tx_descriptor async_tx;
89};
90
91struct mmp_pdma_phy;
92
93struct mmp_pdma_chan {
94 struct device *dev;
95 struct dma_chan chan;
96 struct dma_async_tx_descriptor desc;
97 struct mmp_pdma_phy *phy;
98 enum dma_transfer_direction dir;
99
50440d74
DM
100 struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
101 * is in cyclic mode */
102
c8acd6aa
ZG
103 /* channel's basic info */
104 struct tasklet_struct tasklet;
105 u32 dcmd;
106 u32 drcmr;
107 u32 dev_addr;
108
109 /* list for desc */
110 spinlock_t desc_lock; /* Descriptor list lock */
111 struct list_head chain_pending; /* Link descriptors queue for pending */
112 struct list_head chain_running; /* Link descriptors queue for running */
113 bool idle; /* channel statue machine */
6fc4573c 114 bool byte_align;
c8acd6aa
ZG
115
116 struct dma_pool *desc_pool; /* Descriptors pool */
117};
118
119struct mmp_pdma_phy {
120 int idx;
121 void __iomem *base;
122 struct mmp_pdma_chan *vchan;
123};
124
125struct mmp_pdma_device {
126 int dma_channels;
127 void __iomem *base;
128 struct device *dev;
129 struct dma_device device;
130 struct mmp_pdma_phy *phy;
027f28b7 131 spinlock_t phy_lock; /* protect alloc/free phy channels */
c8acd6aa
ZG
132};
133
2b7f65b1
JP
134#define tx_to_mmp_pdma_desc(tx) \
135 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
136#define to_mmp_pdma_desc(lh) \
137 container_of(lh, struct mmp_pdma_desc_sw, node)
138#define to_mmp_pdma_chan(dchan) \
139 container_of(dchan, struct mmp_pdma_chan, chan)
140#define to_mmp_pdma_dev(dmadev) \
141 container_of(dmadev, struct mmp_pdma_device, device)
c8acd6aa
ZG
142
143static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
144{
145 u32 reg = (phy->idx << 4) + DDADR;
146
147 writel(addr, phy->base + reg);
148}
149
150static void enable_chan(struct mmp_pdma_phy *phy)
151{
6fc4573c 152 u32 reg, dalgn;
c8acd6aa
ZG
153
154 if (!phy->vchan)
155 return;
156
8b298ded 157 reg = DRCMR(phy->vchan->drcmr);
c8acd6aa
ZG
158 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
159
6fc4573c
DM
160 dalgn = readl(phy->base + DALGN);
161 if (phy->vchan->byte_align)
162 dalgn |= 1 << phy->idx;
163 else
164 dalgn &= ~(1 << phy->idx);
165 writel(dalgn, phy->base + DALGN);
166
c8acd6aa 167 reg = (phy->idx << 2) + DCSR;
2b7f65b1 168 writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
c8acd6aa
ZG
169}
170
171static void disable_chan(struct mmp_pdma_phy *phy)
172{
173 u32 reg;
174
2b7f65b1
JP
175 if (!phy)
176 return;
177
178 reg = (phy->idx << 2) + DCSR;
179 writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
c8acd6aa
ZG
180}
181
182static int clear_chan_irq(struct mmp_pdma_phy *phy)
183{
184 u32 dcsr;
185 u32 dint = readl(phy->base + DINT);
186 u32 reg = (phy->idx << 2) + DCSR;
187
2b7f65b1
JP
188 if (!(dint & BIT(phy->idx)))
189 return -EAGAIN;
190
191 /* clear irq */
192 dcsr = readl(phy->base + reg);
193 writel(dcsr, phy->base + reg);
194 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
195 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
196
197 return 0;
c8acd6aa
ZG
198}
199
200static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
201{
202 struct mmp_pdma_phy *phy = dev_id;
203
2b7f65b1 204 if (clear_chan_irq(phy) != 0)
c8acd6aa 205 return IRQ_NONE;
2b7f65b1
JP
206
207 tasklet_schedule(&phy->vchan->tasklet);
208 return IRQ_HANDLED;
c8acd6aa
ZG
209}
210
211static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
212{
213 struct mmp_pdma_device *pdev = dev_id;
214 struct mmp_pdma_phy *phy;
215 u32 dint = readl(pdev->base + DINT);
216 int i, ret;
217 int irq_num = 0;
218
219 while (dint) {
220 i = __ffs(dint);
3a314f14
QZ
221 /* only handle interrupts belonging to pdma driver*/
222 if (i >= pdev->dma_channels)
223 break;
c8acd6aa
ZG
224 dint &= (dint - 1);
225 phy = &pdev->phy[i];
226 ret = mmp_pdma_chan_handler(irq, phy);
227 if (ret == IRQ_HANDLED)
228 irq_num++;
229 }
230
231 if (irq_num)
232 return IRQ_HANDLED;
2b7f65b1
JP
233
234 return IRQ_NONE;
c8acd6aa
ZG
235}
236
237/* lookup free phy channel as descending priority */
238static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
239{
240 int prio, i;
241 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
638a542c 242 struct mmp_pdma_phy *phy, *found = NULL;
027f28b7 243 unsigned long flags;
c8acd6aa
ZG
244
245 /*
246 * dma channel priorities
247 * ch 0 - 3, 16 - 19 <--> (0)
248 * ch 4 - 7, 20 - 23 <--> (1)
249 * ch 8 - 11, 24 - 27 <--> (2)
250 * ch 12 - 15, 28 - 31 <--> (3)
251 */
027f28b7
XW
252
253 spin_lock_irqsave(&pdev->phy_lock, flags);
2b7f65b1 254 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
c8acd6aa 255 for (i = 0; i < pdev->dma_channels; i++) {
2b7f65b1 256 if (prio != (i & 0xf) >> 2)
c8acd6aa
ZG
257 continue;
258 phy = &pdev->phy[i];
259 if (!phy->vchan) {
260 phy->vchan = pchan;
638a542c
DM
261 found = phy;
262 goto out_unlock;
c8acd6aa
ZG
263 }
264 }
265 }
266
638a542c 267out_unlock:
027f28b7 268 spin_unlock_irqrestore(&pdev->phy_lock, flags);
638a542c 269 return found;
c8acd6aa
ZG
270}
271
027f28b7
XW
272static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
273{
274 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
275 unsigned long flags;
26a2dfde 276 u32 reg;
027f28b7
XW
277
278 if (!pchan->phy)
279 return;
280
26a2dfde 281 /* clear the channel mapping in DRCMR */
a2a7c176 282 reg = DRCMR(pchan->drcmr);
26a2dfde
XW
283 writel(0, pchan->phy->base + reg);
284
027f28b7
XW
285 spin_lock_irqsave(&pdev->phy_lock, flags);
286 pchan->phy->vchan = NULL;
287 pchan->phy = NULL;
288 spin_unlock_irqrestore(&pdev->phy_lock, flags);
289}
290
c8acd6aa
ZG
291/**
292 * start_pending_queue - transfer any pending transactions
293 * pending list ==> running list
294 */
295static void start_pending_queue(struct mmp_pdma_chan *chan)
296{
297 struct mmp_pdma_desc_sw *desc;
298
299 /* still in running, irq will start the pending list */
300 if (!chan->idle) {
301 dev_dbg(chan->dev, "DMA controller still busy\n");
302 return;
303 }
304
305 if (list_empty(&chan->chain_pending)) {
306 /* chance to re-fetch phy channel with higher prio */
027f28b7 307 mmp_pdma_free_phy(chan);
c8acd6aa
ZG
308 dev_dbg(chan->dev, "no pending list\n");
309 return;
310 }
311
312 if (!chan->phy) {
313 chan->phy = lookup_phy(chan);
314 if (!chan->phy) {
315 dev_dbg(chan->dev, "no free dma channel\n");
316 return;
317 }
318 }
319
320 /*
321 * pending -> running
322 * reintilize pending list
323 */
324 desc = list_first_entry(&chan->chain_pending,
325 struct mmp_pdma_desc_sw, node);
326 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
327
328 /*
329 * Program the descriptor's address into the DMA controller,
330 * then start the DMA transaction
331 */
332 set_desc(chan->phy, desc->async_tx.phys);
333 enable_chan(chan->phy);
334 chan->idle = false;
335}
336
337
338/* desc->tx_list ==> pending list */
339static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
340{
341 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
342 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
343 struct mmp_pdma_desc_sw *child;
344 unsigned long flags;
345 dma_cookie_t cookie = -EBUSY;
346
347 spin_lock_irqsave(&chan->desc_lock, flags);
348
349 list_for_each_entry(child, &desc->tx_list, node) {
350 cookie = dma_cookie_assign(&child->async_tx);
351 }
352
0cd61561
DM
353 /* softly link to pending list - desc->tx_list ==> pending list */
354 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
c8acd6aa
ZG
355
356 spin_unlock_irqrestore(&chan->desc_lock, flags);
357
358 return cookie;
359}
360
69c9f0ae
JH
361static struct mmp_pdma_desc_sw *
362mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
c8acd6aa
ZG
363{
364 struct mmp_pdma_desc_sw *desc;
365 dma_addr_t pdesc;
366
367 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
368 if (!desc) {
369 dev_err(chan->dev, "out of memory for link descriptor\n");
370 return NULL;
371 }
372
373 memset(desc, 0, sizeof(*desc));
374 INIT_LIST_HEAD(&desc->tx_list);
375 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
376 /* each desc has submit */
377 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
378 desc->async_tx.phys = pdesc;
379
380 return desc;
381}
382
383/**
384 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
385 *
386 * This function will create a dma pool for descriptor allocation.
387 * Request irq only when channel is requested
388 * Return - The number of allocated descriptors.
389 */
390
391static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
392{
393 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
394
395 if (chan->desc_pool)
396 return 1;
397
2b7f65b1
JP
398 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
399 chan->dev,
400 sizeof(struct mmp_pdma_desc_sw),
401 __alignof__(struct mmp_pdma_desc_sw),
402 0);
c8acd6aa
ZG
403 if (!chan->desc_pool) {
404 dev_err(chan->dev, "unable to allocate descriptor pool\n");
405 return -ENOMEM;
406 }
2b7f65b1 407
027f28b7 408 mmp_pdma_free_phy(chan);
c8acd6aa
ZG
409 chan->idle = true;
410 chan->dev_addr = 0;
411 return 1;
412}
413
414static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
2b7f65b1 415 struct list_head *list)
c8acd6aa
ZG
416{
417 struct mmp_pdma_desc_sw *desc, *_desc;
418
419 list_for_each_entry_safe(desc, _desc, list, node) {
420 list_del(&desc->node);
421 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
422 }
423}
424
425static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
426{
427 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
428 unsigned long flags;
429
430 spin_lock_irqsave(&chan->desc_lock, flags);
431 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
432 mmp_pdma_free_desc_list(chan, &chan->chain_running);
433 spin_unlock_irqrestore(&chan->desc_lock, flags);
434
435 dma_pool_destroy(chan->desc_pool);
436 chan->desc_pool = NULL;
437 chan->idle = true;
438 chan->dev_addr = 0;
027f28b7 439 mmp_pdma_free_phy(chan);
c8acd6aa
ZG
440 return;
441}
442
443static struct dma_async_tx_descriptor *
444mmp_pdma_prep_memcpy(struct dma_chan *dchan,
2b7f65b1
JP
445 dma_addr_t dma_dst, dma_addr_t dma_src,
446 size_t len, unsigned long flags)
c8acd6aa
ZG
447{
448 struct mmp_pdma_chan *chan;
449 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
450 size_t copy = 0;
451
452 if (!dchan)
453 return NULL;
454
455 if (!len)
456 return NULL;
457
458 chan = to_mmp_pdma_chan(dchan);
6fc4573c 459 chan->byte_align = false;
c8acd6aa
ZG
460
461 if (!chan->dir) {
462 chan->dir = DMA_MEM_TO_MEM;
463 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
464 chan->dcmd |= DCMD_BURST32;
465 }
466
467 do {
468 /* Allocate the link descriptor from DMA pool */
469 new = mmp_pdma_alloc_descriptor(chan);
470 if (!new) {
471 dev_err(chan->dev, "no memory for desc\n");
472 goto fail;
473 }
474
475 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
6fc4573c
DM
476 if (dma_src & 0x7 || dma_dst & 0x7)
477 chan->byte_align = true;
c8acd6aa
ZG
478
479 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
480 new->desc.dsadr = dma_src;
481 new->desc.dtadr = dma_dst;
482
483 if (!first)
484 first = new;
485 else
486 prev->desc.ddadr = new->async_tx.phys;
487
488 new->async_tx.cookie = 0;
489 async_tx_ack(&new->async_tx);
490
491 prev = new;
492 len -= copy;
493
494 if (chan->dir == DMA_MEM_TO_DEV) {
495 dma_src += copy;
496 } else if (chan->dir == DMA_DEV_TO_MEM) {
497 dma_dst += copy;
498 } else if (chan->dir == DMA_MEM_TO_MEM) {
499 dma_src += copy;
500 dma_dst += copy;
501 }
502
503 /* Insert the link descriptor to the LD ring */
504 list_add_tail(&new->node, &first->tx_list);
505 } while (len);
506
507 first->async_tx.flags = flags; /* client is in control of this ack */
508 first->async_tx.cookie = -EBUSY;
509
510 /* last desc and fire IRQ */
511 new->desc.ddadr = DDADR_STOP;
512 new->desc.dcmd |= DCMD_ENDIRQEN;
513
50440d74
DM
514 chan->cyclic_first = NULL;
515
c8acd6aa
ZG
516 return &first->async_tx;
517
518fail:
519 if (first)
520 mmp_pdma_free_desc_list(chan, &first->tx_list);
521 return NULL;
522}
523
524static struct dma_async_tx_descriptor *
525mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2b7f65b1
JP
526 unsigned int sg_len, enum dma_transfer_direction dir,
527 unsigned long flags, void *context)
c8acd6aa
ZG
528{
529 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
530 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
531 size_t len, avail;
532 struct scatterlist *sg;
533 dma_addr_t addr;
534 int i;
535
536 if ((sgl == NULL) || (sg_len == 0))
537 return NULL;
538
6fc4573c
DM
539 chan->byte_align = false;
540
c8acd6aa
ZG
541 for_each_sg(sgl, sg, sg_len, i) {
542 addr = sg_dma_address(sg);
543 avail = sg_dma_len(sgl);
544
545 do {
546 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
6fc4573c
DM
547 if (addr & 0x7)
548 chan->byte_align = true;
c8acd6aa
ZG
549
550 /* allocate and populate the descriptor */
551 new = mmp_pdma_alloc_descriptor(chan);
552 if (!new) {
553 dev_err(chan->dev, "no memory for desc\n");
554 goto fail;
555 }
556
557 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
558 if (dir == DMA_MEM_TO_DEV) {
559 new->desc.dsadr = addr;
560 new->desc.dtadr = chan->dev_addr;
561 } else {
562 new->desc.dsadr = chan->dev_addr;
563 new->desc.dtadr = addr;
564 }
565
566 if (!first)
567 first = new;
568 else
569 prev->desc.ddadr = new->async_tx.phys;
570
571 new->async_tx.cookie = 0;
572 async_tx_ack(&new->async_tx);
573 prev = new;
574
575 /* Insert the link descriptor to the LD ring */
576 list_add_tail(&new->node, &first->tx_list);
577
578 /* update metadata */
579 addr += len;
580 avail -= len;
581 } while (avail);
582 }
583
584 first->async_tx.cookie = -EBUSY;
585 first->async_tx.flags = flags;
586
587 /* last desc and fire IRQ */
588 new->desc.ddadr = DDADR_STOP;
589 new->desc.dcmd |= DCMD_ENDIRQEN;
590
50440d74
DM
591 chan->dir = dir;
592 chan->cyclic_first = NULL;
593
594 return &first->async_tx;
595
596fail:
597 if (first)
598 mmp_pdma_free_desc_list(chan, &first->tx_list);
599 return NULL;
600}
601
2b7f65b1
JP
602static struct dma_async_tx_descriptor *
603mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
604 dma_addr_t buf_addr, size_t len, size_t period_len,
605 enum dma_transfer_direction direction,
31c1e5a1 606 unsigned long flags)
50440d74
DM
607{
608 struct mmp_pdma_chan *chan;
609 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
610 dma_addr_t dma_src, dma_dst;
611
612 if (!dchan || !len || !period_len)
613 return NULL;
614
615 /* the buffer length must be a multiple of period_len */
616 if (len % period_len != 0)
617 return NULL;
618
619 if (period_len > PDMA_MAX_DESC_BYTES)
620 return NULL;
621
622 chan = to_mmp_pdma_chan(dchan);
623
624 switch (direction) {
625 case DMA_MEM_TO_DEV:
626 dma_src = buf_addr;
627 dma_dst = chan->dev_addr;
628 break;
629 case DMA_DEV_TO_MEM:
630 dma_dst = buf_addr;
631 dma_src = chan->dev_addr;
632 break;
633 default:
634 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
635 return NULL;
636 }
637
638 chan->dir = direction;
639
640 do {
641 /* Allocate the link descriptor from DMA pool */
642 new = mmp_pdma_alloc_descriptor(chan);
643 if (!new) {
644 dev_err(chan->dev, "no memory for desc\n");
645 goto fail;
646 }
647
2b7f65b1
JP
648 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
649 (DCMD_LENGTH & period_len));
50440d74
DM
650 new->desc.dsadr = dma_src;
651 new->desc.dtadr = dma_dst;
652
653 if (!first)
654 first = new;
655 else
656 prev->desc.ddadr = new->async_tx.phys;
657
658 new->async_tx.cookie = 0;
659 async_tx_ack(&new->async_tx);
660
661 prev = new;
662 len -= period_len;
663
664 if (chan->dir == DMA_MEM_TO_DEV)
665 dma_src += period_len;
666 else
667 dma_dst += period_len;
668
669 /* Insert the link descriptor to the LD ring */
670 list_add_tail(&new->node, &first->tx_list);
671 } while (len);
672
673 first->async_tx.flags = flags; /* client is in control of this ack */
674 first->async_tx.cookie = -EBUSY;
675
676 /* make the cyclic link */
677 new->desc.ddadr = first->async_tx.phys;
678 chan->cyclic_first = first;
679
c8acd6aa
ZG
680 return &first->async_tx;
681
682fail:
683 if (first)
684 mmp_pdma_free_desc_list(chan, &first->tx_list);
685 return NULL;
686}
687
a0abd671
MR
688static int mmp_pdma_config(struct dma_chan *dchan,
689 struct dma_slave_config *cfg)
c8acd6aa
ZG
690{
691 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
c8acd6aa
ZG
692 u32 maxburst = 0, addr = 0;
693 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
694
695 if (!dchan)
696 return -EINVAL;
697
a0abd671
MR
698 if (cfg->direction == DMA_DEV_TO_MEM) {
699 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
700 maxburst = cfg->src_maxburst;
701 width = cfg->src_addr_width;
702 addr = cfg->src_addr;
703 } else if (cfg->direction == DMA_MEM_TO_DEV) {
704 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
705 maxburst = cfg->dst_maxburst;
706 width = cfg->dst_addr_width;
707 addr = cfg->dst_addr;
c8acd6aa
ZG
708 }
709
a0abd671
MR
710 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
711 chan->dcmd |= DCMD_WIDTH1;
712 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
713 chan->dcmd |= DCMD_WIDTH2;
714 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
715 chan->dcmd |= DCMD_WIDTH4;
716
717 if (maxburst == 8)
718 chan->dcmd |= DCMD_BURST8;
719 else if (maxburst == 16)
720 chan->dcmd |= DCMD_BURST16;
721 else if (maxburst == 32)
722 chan->dcmd |= DCMD_BURST32;
723
724 chan->dir = cfg->direction;
725 chan->dev_addr = addr;
726 /* FIXME: drivers should be ported over to use the filter
727 * function. Once that's done, the following two lines can
728 * be removed.
729 */
730 if (cfg->slave_id)
731 chan->drcmr = cfg->slave_id;
732
733 return 0;
734}
735
736static int mmp_pdma_terminate_all(struct dma_chan *dchan)
737{
738 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
739 unsigned long flags;
740
741 if (!dchan)
742 return -EINVAL;
743
744 disable_chan(chan->phy);
745 mmp_pdma_free_phy(chan);
746 spin_lock_irqsave(&chan->desc_lock, flags);
747 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
748 mmp_pdma_free_desc_list(chan, &chan->chain_running);
749 spin_unlock_irqrestore(&chan->desc_lock, flags);
750 chan->idle = true;
751
2b7f65b1 752 return 0;
c8acd6aa
ZG
753}
754
1b38da26
DM
755static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
756 dma_cookie_t cookie)
757{
758 struct mmp_pdma_desc_sw *sw;
759 u32 curr, residue = 0;
760 bool passed = false;
761 bool cyclic = chan->cyclic_first != NULL;
762
763 /*
764 * If the channel does not have a phy pointer anymore, it has already
765 * been completed. Therefore, its residue is 0.
766 */
767 if (!chan->phy)
768 return 0;
769
770 if (chan->dir == DMA_DEV_TO_MEM)
771 curr = readl(chan->phy->base + DTADR(chan->phy->idx));
772 else
773 curr = readl(chan->phy->base + DSADR(chan->phy->idx));
774
775 list_for_each_entry(sw, &chan->chain_running, node) {
776 u32 start, end, len;
777
778 if (chan->dir == DMA_DEV_TO_MEM)
779 start = sw->desc.dtadr;
780 else
781 start = sw->desc.dsadr;
782
783 len = sw->desc.dcmd & DCMD_LENGTH;
784 end = start + len;
785
786 /*
787 * 'passed' will be latched once we found the descriptor which
788 * lies inside the boundaries of the curr pointer. All
789 * descriptors that occur in the list _after_ we found that
790 * partially handled descriptor are still to be processed and
791 * are hence added to the residual bytes counter.
792 */
793
794 if (passed) {
795 residue += len;
796 } else if (curr >= start && curr <= end) {
797 residue += end - curr;
798 passed = true;
799 }
800
801 /*
802 * Descriptors that have the ENDIRQEN bit set mark the end of a
803 * transaction chain, and the cookie assigned with it has been
804 * returned previously from mmp_pdma_tx_submit().
805 *
806 * In case we have multiple transactions in the running chain,
807 * and the cookie does not match the one the user asked us
808 * about, reset the state variables and start over.
809 *
810 * This logic does not apply to cyclic transactions, where all
811 * descriptors have the ENDIRQEN bit set, and for which we
812 * can't have multiple transactions on one channel anyway.
813 */
814 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
815 continue;
816
817 if (sw->async_tx.cookie == cookie) {
818 return residue;
819 } else {
820 residue = 0;
821 passed = false;
822 }
823 }
824
825 /* We should only get here in case of cyclic transactions */
826 return residue;
827}
828
c8acd6aa 829static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
2b7f65b1
JP
830 dma_cookie_t cookie,
831 struct dma_tx_state *txstate)
c8acd6aa 832{
1b38da26
DM
833 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
834 enum dma_status ret;
835
836 ret = dma_cookie_status(dchan, cookie, txstate);
837 if (likely(ret != DMA_ERROR))
838 dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
839
840 return ret;
c8acd6aa
ZG
841}
842
843/**
844 * mmp_pdma_issue_pending - Issue the DMA start command
845 * pending list ==> running list
846 */
847static void mmp_pdma_issue_pending(struct dma_chan *dchan)
848{
849 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
850 unsigned long flags;
851
852 spin_lock_irqsave(&chan->desc_lock, flags);
853 start_pending_queue(chan);
854 spin_unlock_irqrestore(&chan->desc_lock, flags);
855}
856
857/*
858 * dma_do_tasklet
859 * Do call back
860 * Start pending list
861 */
862static void dma_do_tasklet(unsigned long data)
863{
864 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
865 struct mmp_pdma_desc_sw *desc, *_desc;
866 LIST_HEAD(chain_cleanup);
867 unsigned long flags;
868
50440d74
DM
869 if (chan->cyclic_first) {
870 dma_async_tx_callback cb = NULL;
871 void *cb_data = NULL;
c8acd6aa 872
50440d74
DM
873 spin_lock_irqsave(&chan->desc_lock, flags);
874 desc = chan->cyclic_first;
875 cb = desc->async_tx.callback;
876 cb_data = desc->async_tx.callback_param;
877 spin_unlock_irqrestore(&chan->desc_lock, flags);
878
879 if (cb)
880 cb(cb_data);
881
882 return;
883 }
884
885 /* submit pending list; callback for each desc; free desc */
c8acd6aa
ZG
886 spin_lock_irqsave(&chan->desc_lock, flags);
887
b721f9e8
DM
888 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
889 /*
890 * move the descriptors to a temporary list so we can drop
891 * the lock during the entire cleanup operation
892 */
f358c289 893 list_move(&desc->node, &chain_cleanup);
c8acd6aa 894
b721f9e8
DM
895 /*
896 * Look for the first list entry which has the ENDIRQEN flag
897 * set. That is the descriptor we got an interrupt for, so
898 * complete that transaction and its cookie.
899 */
900 if (desc->desc.dcmd & DCMD_ENDIRQEN) {
901 dma_cookie_t cookie = desc->async_tx.cookie;
902 dma_cookie_complete(&desc->async_tx);
903 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
904 break;
905 }
c8acd6aa
ZG
906 }
907
908 /*
b721f9e8
DM
909 * The hardware is idle and ready for more when the
910 * chain_running list is empty.
c8acd6aa 911 */
b721f9e8 912 chan->idle = list_empty(&chan->chain_running);
c8acd6aa
ZG
913
914 /* Start any pending transactions automatically */
915 start_pending_queue(chan);
916 spin_unlock_irqrestore(&chan->desc_lock, flags);
917
918 /* Run the callback for each descriptor, in order */
919 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
920 struct dma_async_tx_descriptor *txd = &desc->async_tx;
921
922 /* Remove from the list of transactions */
923 list_del(&desc->node);
924 /* Run the link descriptor callback function */
925 if (txd->callback)
926 txd->callback(txd->callback_param);
927
928 dma_pool_free(chan->desc_pool, desc, txd->phys);
929 }
930}
931
4bf27b8b 932static int mmp_pdma_remove(struct platform_device *op)
c8acd6aa
ZG
933{
934 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
935
936 dma_async_device_unregister(&pdev->device);
937 return 0;
938}
939
2b7f65b1 940static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
c8acd6aa
ZG
941{
942 struct mmp_pdma_phy *phy = &pdev->phy[idx];
943 struct mmp_pdma_chan *chan;
944 int ret;
945
593d9c2e 946 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
c8acd6aa
ZG
947 if (chan == NULL)
948 return -ENOMEM;
949
950 phy->idx = idx;
951 phy->base = pdev->base;
952
953 if (irq) {
f0b50777
CX
954 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
955 IRQF_SHARED, "pdma", phy);
c8acd6aa
ZG
956 if (ret) {
957 dev_err(pdev->dev, "channel request irq fail!\n");
958 return ret;
959 }
960 }
961
962 spin_lock_init(&chan->desc_lock);
963 chan->dev = pdev->dev;
964 chan->chan.device = &pdev->device;
965 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
966 INIT_LIST_HEAD(&chan->chain_pending);
967 INIT_LIST_HEAD(&chan->chain_running);
968
969 /* register virt channel to dma engine */
2b7f65b1 970 list_add_tail(&chan->chan.device_node, &pdev->device.channels);
c8acd6aa
ZG
971
972 return 0;
973}
974
57c03422 975static const struct of_device_id mmp_pdma_dt_ids[] = {
c8acd6aa
ZG
976 { .compatible = "marvell,pdma-1.0", },
977 {}
978};
979MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
980
a9a7cf08
DM
981static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
982 struct of_dma *ofdma)
983{
984 struct mmp_pdma_device *d = ofdma->of_dma_data;
8010dad5 985 struct dma_chan *chan;
a9a7cf08 986
8010dad5
SW
987 chan = dma_get_any_slave_channel(&d->device);
988 if (!chan)
a9a7cf08
DM
989 return NULL;
990
2b7f65b1
JP
991 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
992
993 return chan;
a9a7cf08
DM
994}
995
463a1f8b 996static int mmp_pdma_probe(struct platform_device *op)
c8acd6aa
ZG
997{
998 struct mmp_pdma_device *pdev;
999 const struct of_device_id *of_id;
1000 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1001 struct resource *iores;
1002 int i, ret, irq = 0;
1003 int dma_channels = 0, irq_num = 0;
ecb9b424
RJ
1004 const enum dma_slave_buswidth widths =
1005 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1006 DMA_SLAVE_BUSWIDTH_4_BYTES;
c8acd6aa
ZG
1007
1008 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1009 if (!pdev)
1010 return -ENOMEM;
2b7f65b1 1011
c8acd6aa
ZG
1012 pdev->dev = &op->dev;
1013
027f28b7
XW
1014 spin_lock_init(&pdev->phy_lock);
1015
c8acd6aa 1016 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
7331205a
TR
1017 pdev->base = devm_ioremap_resource(pdev->dev, iores);
1018 if (IS_ERR(pdev->base))
1019 return PTR_ERR(pdev->base);
c8acd6aa
ZG
1020
1021 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
1022 if (of_id)
2b7f65b1
JP
1023 of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1024 &dma_channels);
c8acd6aa
ZG
1025 else if (pdata && pdata->dma_channels)
1026 dma_channels = pdata->dma_channels;
1027 else
1028 dma_channels = 32; /* default 32 channel */
1029 pdev->dma_channels = dma_channels;
1030
1031 for (i = 0; i < dma_channels; i++) {
1032 if (platform_get_irq(op, i) > 0)
1033 irq_num++;
1034 }
1035
593d9c2e 1036 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
2b7f65b1 1037 GFP_KERNEL);
c8acd6aa
ZG
1038 if (pdev->phy == NULL)
1039 return -ENOMEM;
1040
1041 INIT_LIST_HEAD(&pdev->device.channels);
1042
1043 if (irq_num != dma_channels) {
1044 /* all chan share one irq, demux inside */
1045 irq = platform_get_irq(op, 0);
f0b50777
CX
1046 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1047 IRQF_SHARED, "pdma", pdev);
c8acd6aa
ZG
1048 if (ret)
1049 return ret;
1050 }
1051
1052 for (i = 0; i < dma_channels; i++) {
1053 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1054 ret = mmp_pdma_chan_init(pdev, i, irq);
1055 if (ret)
1056 return ret;
1057 }
1058
1059 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1060 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
50440d74 1061 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
023bf55f 1062 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
c8acd6aa
ZG
1063 pdev->device.dev = &op->dev;
1064 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1065 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1066 pdev->device.device_tx_status = mmp_pdma_tx_status;
1067 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1068 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
50440d74 1069 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
c8acd6aa 1070 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
a0abd671
MR
1071 pdev->device.device_config = mmp_pdma_config;
1072 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
77a68e56 1073 pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
ecb9b424
RJ
1074 pdev->device.src_addr_widths = widths;
1075 pdev->device.dst_addr_widths = widths;
1076 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1077 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
c8acd6aa
ZG
1078
1079 if (pdev->dev->coherent_dma_mask)
1080 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1081 else
1082 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1083
1084 ret = dma_async_device_register(&pdev->device);
1085 if (ret) {
1086 dev_err(pdev->device.dev, "unable to register\n");
1087 return ret;
1088 }
1089
a9a7cf08
DM
1090 if (op->dev.of_node) {
1091 /* Device-tree DMA controller registration */
1092 ret = of_dma_controller_register(op->dev.of_node,
1093 mmp_pdma_dma_xlate, pdev);
1094 if (ret < 0) {
1095 dev_err(&op->dev, "of_dma_controller_register failed\n");
1096 return ret;
1097 }
1098 }
1099
086b0af1 1100 platform_set_drvdata(op, pdev);
419d1f12 1101 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
c8acd6aa
ZG
1102 return 0;
1103}
1104
1105static const struct platform_device_id mmp_pdma_id_table[] = {
1106 { "mmp-pdma", },
1107 { },
1108};
1109
1110static struct platform_driver mmp_pdma_driver = {
1111 .driver = {
1112 .name = "mmp-pdma",
c8acd6aa
ZG
1113 .of_match_table = mmp_pdma_dt_ids,
1114 },
1115 .id_table = mmp_pdma_id_table,
1116 .probe = mmp_pdma_probe,
a7d6e3ec 1117 .remove = mmp_pdma_remove,
c8acd6aa
ZG
1118};
1119
13b3006b
DM
1120bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
1121{
1122 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
1123
1124 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
1125 return false;
1126
2b7f65b1 1127 c->drcmr = *(unsigned int *)param;
13b3006b
DM
1128
1129 return true;
1130}
1131EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
1132
c8acd6aa
ZG
1133module_platform_driver(mmp_pdma_driver);
1134
2b7f65b1 1135MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
c8acd6aa
ZG
1136MODULE_AUTHOR("Marvell International Ltd.");
1137MODULE_LICENSE("GPL v2");
This page took 0.208271 seconds and 5 git commands to generate.