dmaengine: PL08x: get src/dst addr direct from dma_slave_config struct
[deliverable/linux.git] / drivers / dma / amba-pl08x.c
CommitLineData
e8689e63
LW
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
94ae8522
RKAL
22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING.
e8689e63
LW
24 *
25 * Documentation: ARM DDI 0196G == PL080
94ae8522 26 * Documentation: ARM DDI 0218E == PL081
e8689e63 27 *
94ae8522
RKAL
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * channel.
e8689e63
LW
30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
36 *
37 * The PL080 has a dual bus master, PL081 has a single master.
38 *
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
41 * Until no data left
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
44 * Clear burst request
45 * Raise terminal count interrupt
46 *
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
50 *
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
53 *
54 * ASSUMES default (little) endianness for DMA transfers
55 *
9dc2c200
RKAL
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
e8689e63
LW
69 * Global TODO:
70 * - Break out common code from arch/arm/mach-s3c64xx and share
71 */
730404ac 72#include <linux/amba/bus.h>
e8689e63
LW
73#include <linux/amba/pl08x.h>
74#include <linux/debugfs.h>
0c38d701
VK
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>
8516f52f 79#include <linux/dma-mapping.h>
0c38d701
VK
80#include <linux/init.h>
81#include <linux/interrupt.h>
82#include <linux/module.h>
b7b6018b 83#include <linux/pm_runtime.h>
e8689e63 84#include <linux/seq_file.h>
0c38d701 85#include <linux/slab.h>
e8689e63 86#include <asm/hardware/pl080.h>
e8689e63 87
d2ebfb33
RKAL
88#include "dmaengine.h"
89
e8689e63
LW
90#define DRIVER_NAME "pl08xdmac"
91
7703eac9 92static struct amba_driver pl08x_amba_driver;
b23f204c 93struct pl08x_driver_data;
7703eac9 94
e8689e63 95/**
94ae8522 96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
e8689e63 97 * @channels: the number of channels available in this variant
94ae8522 98 * @dualmaster: whether this version supports dual AHB masters or not.
affa115e
LW
99 * @nomadik: whether the channels have Nomadik security extension bits
100 * that need to be checked for permission before use and some registers are
101 * missing
e8689e63
LW
102 */
103struct vendor_data {
e8689e63
LW
104 u8 channels;
105 bool dualmaster;
affa115e 106 bool nomadik;
e8689e63
LW
107};
108
109/*
110 * PL08X private data structures
e8b5e11d 111 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
e25761d7
RKAL
112 * start & end do not - their bus bit info is in cctl. Also note that these
113 * are fixed 32-bit quantities.
e8689e63 114 */
7cb72ad9 115struct pl08x_lli {
e25761d7
RKAL
116 u32 src;
117 u32 dst;
bfddfb45 118 u32 lli;
e8689e63
LW
119 u32 cctl;
120};
121
b23f204c
RK
122/**
123 * struct pl08x_bus_data - information of source or destination
124 * busses for a transfer
125 * @addr: current address
126 * @maxwidth: the maximum width of a transfer on this bus
127 * @buswidth: the width of this bus in bytes: 1, 2 or 4
128 */
129struct pl08x_bus_data {
130 dma_addr_t addr;
131 u8 maxwidth;
132 u8 buswidth;
133};
134
135/**
136 * struct pl08x_phy_chan - holder for the physical channels
137 * @id: physical index to this channel
138 * @lock: a lock to use when altering an instance of this struct
139 * @signal: the physical signal (aka channel) serving this physical channel
140 * right now
141 * @serving: the virtual channel currently being served by this physical
142 * channel
143 */
144struct pl08x_phy_chan {
145 unsigned int id;
146 void __iomem *base;
147 spinlock_t lock;
148 int signal;
149 struct pl08x_dma_chan *serving;
150};
151
152/**
153 * struct pl08x_sg - structure containing data per sg
154 * @src_addr: src address of sg
155 * @dst_addr: dst address of sg
156 * @len: transfer len in bytes
157 * @node: node for txd's dsg_list
158 */
159struct pl08x_sg {
160 dma_addr_t src_addr;
161 dma_addr_t dst_addr;
162 size_t len;
163 struct list_head node;
164};
165
166/**
167 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
168 * @tx: async tx descriptor
169 * @node: node for txd list for channels
170 * @dsg_list: list of children sg's
171 * @direction: direction of transfer
172 * @llis_bus: DMA memory address (physical) start for the LLIs
173 * @llis_va: virtual memory address start for the LLIs
174 * @cctl: control reg values for current txd
175 * @ccfg: config reg values for current txd
176 */
177struct pl08x_txd {
178 struct dma_async_tx_descriptor tx;
179 struct list_head node;
180 struct list_head dsg_list;
181 enum dma_transfer_direction direction;
182 dma_addr_t llis_bus;
183 struct pl08x_lli *llis_va;
184 /* Default cctl value for LLIs */
185 u32 cctl;
186 /*
187 * Settings to be put into the physical channel when we
188 * trigger this txd. Other registers are in llis_va[0].
189 */
190 u32 ccfg;
191};
192
193/**
194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
195 * states
196 * @PL08X_CHAN_IDLE: the channel is idle
197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
198 * channel and is running a transfer on it
199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
200 * channel, but the transfer is currently paused
201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
202 * channel to become available (only pertains to memcpy channels)
203 */
204enum pl08x_dma_chan_state {
205 PL08X_CHAN_IDLE,
206 PL08X_CHAN_RUNNING,
207 PL08X_CHAN_PAUSED,
208 PL08X_CHAN_WAITING,
209};
210
211/**
212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
213 * @chan: wrappped abstract channel
214 * @phychan: the physical channel utilized by this channel, if there is one
215 * @phychan_hold: if non-zero, hold on to the physical channel even if we
216 * have no pending entries
217 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
218 * @name: name of channel
219 * @cd: channel platform data
220 * @runtime_addr: address for RX/TX according to the runtime config
221 * @runtime_direction: current direction of this channel according to
222 * runtime config
223 * @pend_list: queued transactions pending on this channel
224 * @at: active transaction on this channel
225 * @lock: a lock for this channel data
226 * @host: a pointer to the host (internal use)
227 * @state: whether the channel is idle, paused, running etc
228 * @slave: whether this channel is a device (slave) or for memcpy
229 * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
230 * channels. Fill with 'true' if peripheral should be flow controller. Direction
231 * will be selected at Runtime.
232 * @waiting: a TX descriptor on this channel which is waiting for a physical
233 * channel to become available
234 */
235struct pl08x_dma_chan {
236 struct dma_chan chan;
237 struct pl08x_phy_chan *phychan;
238 int phychan_hold;
239 struct tasklet_struct tasklet;
550ec36f 240 const char *name;
b23f204c 241 const struct pl08x_channel_data *cd;
ed91c13d 242 struct dma_slave_config cfg;
b23f204c
RK
243 u32 src_cctl;
244 u32 dst_cctl;
245 enum dma_transfer_direction runtime_direction;
246 struct list_head pend_list;
247 struct pl08x_txd *at;
248 spinlock_t lock;
249 struct pl08x_driver_data *host;
250 enum pl08x_dma_chan_state state;
251 bool slave;
252 bool device_fc;
253 struct pl08x_txd *waiting;
254};
255
e8689e63
LW
256/**
257 * struct pl08x_driver_data - the local state holder for the PL08x
258 * @slave: slave engine for this instance
259 * @memcpy: memcpy engine for this instance
260 * @base: virtual memory base (remapped) for the PL08x
261 * @adev: the corresponding AMBA (PrimeCell) bus entry
262 * @vd: vendor data for this PL08x variant
263 * @pd: platform data passed in from the platform/machine
264 * @phy_chans: array of data for the physical channels
265 * @pool: a pool for the LLI descriptors
266 * @pool_ctr: counter of LLIs in the pool
3e27ee84
VK
267 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
268 * fetches
30749cb4 269 * @mem_buses: set to indicate memory transfers on AHB2.
e8689e63
LW
270 * @lock: a spinlock for this struct
271 */
272struct pl08x_driver_data {
273 struct dma_device slave;
274 struct dma_device memcpy;
275 void __iomem *base;
276 struct amba_device *adev;
f96ca9ec 277 const struct vendor_data *vd;
e8689e63
LW
278 struct pl08x_platform_data *pd;
279 struct pl08x_phy_chan *phy_chans;
280 struct dma_pool *pool;
281 int pool_ctr;
30749cb4
RKAL
282 u8 lli_buses;
283 u8 mem_buses;
e8689e63
LW
284};
285
286/*
287 * PL08X specific defines
288 */
289
e8689e63
LW
290/* Size (bytes) of each LLI buffer allocated for one transfer */
291# define PL08X_LLI_TSFR_SIZE 0x2000
292
e8b5e11d 293/* Maximum times we call dma_pool_alloc on this pool without freeing */
7cb72ad9 294#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
e8689e63
LW
295#define PL08X_ALIGN 8
296
297static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
298{
299 return container_of(chan, struct pl08x_dma_chan, chan);
300}
301
501e67e8
RKAL
302static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
303{
304 return container_of(tx, struct pl08x_txd, tx);
305}
306
e8689e63
LW
307/*
308 * Physical channel handling
309 */
310
311/* Whether a certain channel is busy or not */
312static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
313{
314 unsigned int val;
315
316 val = readl(ch->base + PL080_CH_CONFIG);
317 return val & PL080_CONFIG_ACTIVE;
318}
319
320/*
321 * Set the initial DMA register values i.e. those for the first LLI
e8b5e11d 322 * The next LLI pointer and the configuration interrupt bit have
c885bee4
RKAL
323 * been set when the LLIs were constructed. Poke them into the hardware
324 * and start the transfer.
e8689e63 325 */
c885bee4
RKAL
326static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
327 struct pl08x_txd *txd)
e8689e63 328{
c885bee4 329 struct pl08x_driver_data *pl08x = plchan->host;
e8689e63 330 struct pl08x_phy_chan *phychan = plchan->phychan;
19524d77 331 struct pl08x_lli *lli = &txd->llis_va[0];
09b3c323 332 u32 val;
c885bee4
RKAL
333
334 plchan->at = txd;
e8689e63 335
c885bee4
RKAL
336 /* Wait for channel inactive */
337 while (pl08x_phy_channel_busy(phychan))
338 cpu_relax();
e8689e63 339
c885bee4
RKAL
340 dev_vdbg(&pl08x->adev->dev,
341 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
19524d77
RKAL
342 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
343 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
09b3c323 344 txd->ccfg);
19524d77
RKAL
345
346 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
347 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
348 writel(lli->lli, phychan->base + PL080_CH_LLI);
349 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
09b3c323 350 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
c885bee4
RKAL
351
352 /* Enable the DMA channel */
353 /* Do not access config register until channel shows as disabled */
354 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
19386b32 355 cpu_relax();
e8689e63 356
c885bee4
RKAL
357 /* Do not access config register until channel shows as inactive */
358 val = readl(phychan->base + PL080_CH_CONFIG);
e8689e63 359 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
c885bee4 360 val = readl(phychan->base + PL080_CH_CONFIG);
e8689e63 361
c885bee4 362 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
e8689e63
LW
363}
364
365/*
81796616 366 * Pause the channel by setting the HALT bit.
e8689e63 367 *
81796616
RKAL
368 * For M->P transfers, pause the DMAC first and then stop the peripheral -
369 * the FIFO can only drain if the peripheral is still requesting data.
370 * (note: this can still timeout if the DMAC FIFO never drains of data.)
e8689e63 371 *
81796616
RKAL
372 * For P->M transfers, disable the peripheral first to stop it filling
373 * the DMAC FIFO, and then pause the DMAC.
e8689e63
LW
374 */
375static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
376{
377 u32 val;
81796616 378 int timeout;
e8689e63
LW
379
380 /* Set the HALT bit and wait for the FIFO to drain */
381 val = readl(ch->base + PL080_CH_CONFIG);
382 val |= PL080_CONFIG_HALT;
383 writel(val, ch->base + PL080_CH_CONFIG);
384
385 /* Wait for channel inactive */
81796616
RKAL
386 for (timeout = 1000; timeout; timeout--) {
387 if (!pl08x_phy_channel_busy(ch))
388 break;
389 udelay(1);
390 }
391 if (pl08x_phy_channel_busy(ch))
392 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
e8689e63
LW
393}
394
395static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
396{
397 u32 val;
398
399 /* Clear the HALT bit */
400 val = readl(ch->base + PL080_CH_CONFIG);
401 val &= ~PL080_CONFIG_HALT;
402 writel(val, ch->base + PL080_CH_CONFIG);
403}
404
fb526210
RKAL
405/*
406 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
407 * clears any pending interrupt status. This should not be used for
408 * an on-going transfer, but as a method of shutting down a channel
409 * (eg, when it's no longer used) or terminating a transfer.
410 */
411static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
412 struct pl08x_phy_chan *ch)
e8689e63 413{
fb526210 414 u32 val = readl(ch->base + PL080_CH_CONFIG);
e8689e63 415
fb526210
RKAL
416 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
417 PL080_CONFIG_TC_IRQ_MASK);
e8689e63 418
e8689e63 419 writel(val, ch->base + PL080_CH_CONFIG);
fb526210
RKAL
420
421 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
422 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
e8689e63
LW
423}
424
425static inline u32 get_bytes_in_cctl(u32 cctl)
426{
427 /* The source width defines the number of bytes */
428 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
429
430 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
431 case PL080_WIDTH_8BIT:
432 break;
433 case PL080_WIDTH_16BIT:
434 bytes *= 2;
435 break;
436 case PL080_WIDTH_32BIT:
437 bytes *= 4;
438 break;
439 }
440 return bytes;
441}
442
443/* The channel should be paused when calling this */
444static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
445{
446 struct pl08x_phy_chan *ch;
e8689e63
LW
447 struct pl08x_txd *txd;
448 unsigned long flags;
cace6585 449 size_t bytes = 0;
e8689e63
LW
450
451 spin_lock_irqsave(&plchan->lock, flags);
e8689e63
LW
452 ch = plchan->phychan;
453 txd = plchan->at;
454
455 /*
db9f136a
RKAL
456 * Follow the LLIs to get the number of remaining
457 * bytes in the currently active transaction.
e8689e63
LW
458 */
459 if (ch && txd) {
4c0df6a3 460 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
e8689e63 461
db9f136a 462 /* First get the remaining bytes in the active transfer */
e8689e63
LW
463 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
464
465 if (clli) {
db9f136a
RKAL
466 struct pl08x_lli *llis_va = txd->llis_va;
467 dma_addr_t llis_bus = txd->llis_bus;
468 int index;
469
470 BUG_ON(clli < llis_bus || clli >= llis_bus +
471 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
e8689e63 472
db9f136a
RKAL
473 /*
474 * Locate the next LLI - as this is an array,
475 * it's simple maths to find.
476 */
477 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
478
479 for (; index < MAX_NUM_TSFR_LLIS; index++) {
480 bytes += get_bytes_in_cctl(llis_va[index].cctl);
e8689e63 481
e8689e63 482 /*
e8b5e11d 483 * A LLI pointer of 0 terminates the LLI list
e8689e63 484 */
db9f136a
RKAL
485 if (!llis_va[index].lli)
486 break;
e8689e63
LW
487 }
488 }
489 }
490
491 /* Sum up all queued transactions */
15c17232 492 if (!list_empty(&plchan->pend_list)) {
db9f136a 493 struct pl08x_txd *txdi;
15c17232 494 list_for_each_entry(txdi, &plchan->pend_list, node) {
b7f69d9d
VK
495 struct pl08x_sg *dsg;
496 list_for_each_entry(dsg, &txd->dsg_list, node)
497 bytes += dsg->len;
e8689e63 498 }
e8689e63
LW
499 }
500
501 spin_unlock_irqrestore(&plchan->lock, flags);
502
503 return bytes;
504}
505
506/*
507 * Allocate a physical channel for a virtual channel
94ae8522
RKAL
508 *
509 * Try to locate a physical channel to be used for this transfer. If all
510 * are taken return NULL and the requester will have to cope by using
511 * some fallback PIO mode or retrying later.
e8689e63
LW
512 */
513static struct pl08x_phy_chan *
514pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
515 struct pl08x_dma_chan *virt_chan)
516{
517 struct pl08x_phy_chan *ch = NULL;
518 unsigned long flags;
519 int i;
520
e8689e63
LW
521 for (i = 0; i < pl08x->vd->channels; i++) {
522 ch = &pl08x->phy_chans[i];
523
524 spin_lock_irqsave(&ch->lock, flags);
525
affa115e 526 if (!ch->locked && !ch->serving) {
e8689e63
LW
527 ch->serving = virt_chan;
528 ch->signal = -1;
529 spin_unlock_irqrestore(&ch->lock, flags);
530 break;
531 }
532
533 spin_unlock_irqrestore(&ch->lock, flags);
534 }
535
536 if (i == pl08x->vd->channels) {
537 /* No physical channel available, cope with it */
538 return NULL;
539 }
540
541 return ch;
542}
543
544static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
545 struct pl08x_phy_chan *ch)
546{
547 unsigned long flags;
548
fb526210
RKAL
549 spin_lock_irqsave(&ch->lock, flags);
550
e8689e63 551 /* Stop the channel and clear its interrupts */
fb526210 552 pl08x_terminate_phy_chan(pl08x, ch);
e8689e63
LW
553
554 /* Mark it as free */
e8689e63
LW
555 ch->serving = NULL;
556 spin_unlock_irqrestore(&ch->lock, flags);
557}
558
559/*
560 * LLI handling
561 */
562
563static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
564{
565 switch (coded) {
566 case PL080_WIDTH_8BIT:
567 return 1;
568 case PL080_WIDTH_16BIT:
569 return 2;
570 case PL080_WIDTH_32BIT:
571 return 4;
572 default:
573 break;
574 }
575 BUG();
576 return 0;
577}
578
579static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
cace6585 580 size_t tsize)
e8689e63
LW
581{
582 u32 retbits = cctl;
583
e8b5e11d 584 /* Remove all src, dst and transfer size bits */
e8689e63
LW
585 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
586 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
587 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
588
589 /* Then set the bits according to the parameters */
590 switch (srcwidth) {
591 case 1:
592 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
593 break;
594 case 2:
595 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
596 break;
597 case 4:
598 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
599 break;
600 default:
601 BUG();
602 break;
603 }
604
605 switch (dstwidth) {
606 case 1:
607 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
608 break;
609 case 2:
610 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
611 break;
612 case 4:
613 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
614 break;
615 default:
616 BUG();
617 break;
618 }
619
620 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
621 return retbits;
622}
623
542361f8
RKAL
624struct pl08x_lli_build_data {
625 struct pl08x_txd *txd;
542361f8
RKAL
626 struct pl08x_bus_data srcbus;
627 struct pl08x_bus_data dstbus;
628 size_t remainder;
25c94f7f 629 u32 lli_bus;
542361f8
RKAL
630};
631
e8689e63 632/*
0532e6fc
VK
633 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
634 * victim in case src & dest are not similarly aligned. i.e. If after aligning
635 * masters address with width requirements of transfer (by sending few byte by
636 * byte data), slave is still not aligned, then its width will be reduced to
637 * BYTE.
638 * - prefers the destination bus if both available
036f05fd 639 * - prefers bus with fixed address (i.e. peripheral)
e8689e63 640 */
542361f8
RKAL
641static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
642 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
e8689e63
LW
643{
644 if (!(cctl & PL080_CONTROL_DST_INCR)) {
542361f8
RKAL
645 *mbus = &bd->dstbus;
646 *sbus = &bd->srcbus;
036f05fd
VK
647 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
648 *mbus = &bd->srcbus;
649 *sbus = &bd->dstbus;
e8689e63 650 } else {
036f05fd 651 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
542361f8
RKAL
652 *mbus = &bd->dstbus;
653 *sbus = &bd->srcbus;
036f05fd 654 } else {
542361f8
RKAL
655 *mbus = &bd->srcbus;
656 *sbus = &bd->dstbus;
e8689e63
LW
657 }
658 }
659}
660
661/*
94ae8522 662 * Fills in one LLI for a certain transfer descriptor and advance the counter
e8689e63 663 */
542361f8
RKAL
664static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
665 int num_llis, int len, u32 cctl)
e8689e63 666{
542361f8
RKAL
667 struct pl08x_lli *llis_va = bd->txd->llis_va;
668 dma_addr_t llis_bus = bd->txd->llis_bus;
e8689e63
LW
669
670 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
671
30749cb4 672 llis_va[num_llis].cctl = cctl;
542361f8
RKAL
673 llis_va[num_llis].src = bd->srcbus.addr;
674 llis_va[num_llis].dst = bd->dstbus.addr;
3e27ee84
VK
675 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
676 sizeof(struct pl08x_lli);
25c94f7f 677 llis_va[num_llis].lli |= bd->lli_bus;
e8689e63
LW
678
679 if (cctl & PL080_CONTROL_SRC_INCR)
542361f8 680 bd->srcbus.addr += len;
e8689e63 681 if (cctl & PL080_CONTROL_DST_INCR)
542361f8 682 bd->dstbus.addr += len;
e8689e63 683
542361f8 684 BUG_ON(bd->remainder < len);
cace6585 685
542361f8 686 bd->remainder -= len;
e8689e63
LW
687}
688
03af500f
VK
689static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
690 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
e8689e63 691{
03af500f
VK
692 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
693 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
694 (*total_bytes) += len;
e8689e63
LW
695}
696
697/*
698 * This fills in the table of LLIs for the transfer descriptor
699 * Note that we assume we never have to change the burst sizes
700 * Return 0 for error
701 */
702static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
703 struct pl08x_txd *txd)
704{
e8689e63 705 struct pl08x_bus_data *mbus, *sbus;
542361f8 706 struct pl08x_lli_build_data bd;
e8689e63 707 int num_llis = 0;
03af500f 708 u32 cctl, early_bytes = 0;
b7f69d9d 709 size_t max_bytes_per_lli, total_bytes;
7cb72ad9 710 struct pl08x_lli *llis_va;
b7f69d9d 711 struct pl08x_sg *dsg;
e8689e63 712
3e27ee84 713 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
e8689e63
LW
714 if (!txd->llis_va) {
715 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
716 return 0;
717 }
718
719 pl08x->pool_ctr++;
720
542361f8 721 bd.txd = txd;
25c94f7f 722 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
b7f69d9d 723 cctl = txd->cctl;
542361f8 724
e8689e63 725 /* Find maximum width of the source bus */
542361f8 726 bd.srcbus.maxwidth =
e8689e63
LW
727 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
728 PL080_CONTROL_SWIDTH_SHIFT);
729
730 /* Find maximum width of the destination bus */
542361f8 731 bd.dstbus.maxwidth =
e8689e63
LW
732 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
733 PL080_CONTROL_DWIDTH_SHIFT);
734
b7f69d9d
VK
735 list_for_each_entry(dsg, &txd->dsg_list, node) {
736 total_bytes = 0;
737 cctl = txd->cctl;
e8689e63 738
b7f69d9d
VK
739 bd.srcbus.addr = dsg->src_addr;
740 bd.dstbus.addr = dsg->dst_addr;
741 bd.remainder = dsg->len;
742 bd.srcbus.buswidth = bd.srcbus.maxwidth;
743 bd.dstbus.buswidth = bd.dstbus.maxwidth;
e8689e63 744
b7f69d9d 745 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
e8689e63 746
b7f69d9d
VK
747 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
748 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
749 bd.srcbus.buswidth,
750 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
751 bd.dstbus.buswidth,
752 bd.remainder);
753 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
754 mbus == &bd.srcbus ? "src" : "dst",
755 sbus == &bd.srcbus ? "src" : "dst");
fc74eb79 756
b7f69d9d
VK
757 /*
758 * Zero length is only allowed if all these requirements are
759 * met:
760 * - flow controller is peripheral.
761 * - src.addr is aligned to src.width
762 * - dst.addr is aligned to dst.width
763 *
764 * sg_len == 1 should be true, as there can be two cases here:
765 *
766 * - Memory addresses are contiguous and are not scattered.
767 * Here, Only one sg will be passed by user driver, with
768 * memory address and zero length. We pass this to controller
769 * and after the transfer it will receive the last burst
770 * request from peripheral and so transfer finishes.
771 *
772 * - Memory addresses are scattered and are not contiguous.
773 * Here, Obviously as DMA controller doesn't know when a lli's
774 * transfer gets over, it can't load next lli. So in this
775 * case, there has to be an assumption that only one lli is
776 * supported. Thus, we can't have scattered addresses.
777 */
778 if (!bd.remainder) {
779 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
780 PL080_CONFIG_FLOW_CONTROL_SHIFT;
781 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
0a235657 782 (fc <= PL080_FLOW_SRC2DST_SRC))) {
b7f69d9d
VK
783 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
784 __func__);
785 return 0;
786 }
0a235657 787
b7f69d9d 788 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
880db3ff 789 (bd.dstbus.addr % bd.dstbus.buswidth)) {
b7f69d9d
VK
790 dev_err(&pl08x->adev->dev,
791 "%s src & dst address must be aligned to src"
792 " & dst width if peripheral is flow controller",
793 __func__);
794 return 0;
795 }
03af500f 796
b7f69d9d
VK
797 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
798 bd.dstbus.buswidth, 0);
799 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
800 break;
801 }
e8689e63
LW
802
803 /*
b7f69d9d
VK
804 * Send byte by byte for following cases
805 * - Less than a bus width available
806 * - until master bus is aligned
e8689e63 807 */
b7f69d9d
VK
808 if (bd.remainder < mbus->buswidth)
809 early_bytes = bd.remainder;
810 else if ((mbus->addr) % (mbus->buswidth)) {
811 early_bytes = mbus->buswidth - (mbus->addr) %
812 (mbus->buswidth);
813 if ((bd.remainder - early_bytes) < mbus->buswidth)
814 early_bytes = bd.remainder;
815 }
e8689e63 816
b7f69d9d
VK
817 if (early_bytes) {
818 dev_vdbg(&pl08x->adev->dev,
819 "%s byte width LLIs (remain 0x%08x)\n",
820 __func__, bd.remainder);
821 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
822 &total_bytes);
e8689e63
LW
823 }
824
b7f69d9d
VK
825 if (bd.remainder) {
826 /*
827 * Master now aligned
828 * - if slave is not then we must set its width down
829 */
830 if (sbus->addr % sbus->buswidth) {
831 dev_dbg(&pl08x->adev->dev,
832 "%s set down bus width to one byte\n",
833 __func__);
fa6a940b 834
b7f69d9d
VK
835 sbus->buswidth = 1;
836 }
e8689e63
LW
837
838 /*
b7f69d9d
VK
839 * Bytes transferred = tsize * src width, not
840 * MIN(buswidths)
e8689e63 841 */
b7f69d9d
VK
842 max_bytes_per_lli = bd.srcbus.buswidth *
843 PL080_CONTROL_TRANSFER_SIZE_MASK;
844 dev_vdbg(&pl08x->adev->dev,
845 "%s max bytes per lli = %zu\n",
846 __func__, max_bytes_per_lli);
e8689e63
LW
847
848 /*
b7f69d9d
VK
849 * Make largest possible LLIs until less than one bus
850 * width left
e8689e63 851 */
b7f69d9d
VK
852 while (bd.remainder > (mbus->buswidth - 1)) {
853 size_t lli_len, tsize, width;
e8689e63 854
b7f69d9d
VK
855 /*
856 * If enough left try to send max possible,
857 * otherwise try to send the remainder
858 */
859 lli_len = min(bd.remainder, max_bytes_per_lli);
16a2e7d3 860
b7f69d9d
VK
861 /*
862 * Check against maximum bus alignment:
863 * Calculate actual transfer size in relation to
864 * bus width an get a maximum remainder of the
865 * highest bus width - 1
866 */
867 width = max(mbus->buswidth, sbus->buswidth);
868 lli_len = (lli_len / width) * width;
869 tsize = lli_len / bd.srcbus.buswidth;
870
871 dev_vdbg(&pl08x->adev->dev,
872 "%s fill lli with single lli chunk of "
873 "size 0x%08zx (remainder 0x%08zx)\n",
874 __func__, lli_len, bd.remainder);
875
876 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
16a2e7d3 877 bd.dstbus.buswidth, tsize);
b7f69d9d
VK
878 pl08x_fill_lli_for_desc(&bd, num_llis++,
879 lli_len, cctl);
880 total_bytes += lli_len;
881 }
e8689e63 882
b7f69d9d
VK
883 /*
884 * Send any odd bytes
885 */
886 if (bd.remainder) {
887 dev_vdbg(&pl08x->adev->dev,
888 "%s align with boundary, send odd bytes (remain %zu)\n",
889 __func__, bd.remainder);
890 prep_byte_width_lli(&bd, &cctl, bd.remainder,
891 num_llis++, &total_bytes);
892 }
e8689e63 893 }
16a2e7d3 894
b7f69d9d
VK
895 if (total_bytes != dsg->len) {
896 dev_err(&pl08x->adev->dev,
897 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
898 __func__, total_bytes, dsg->len);
899 return 0;
900 }
e8689e63 901
b7f69d9d
VK
902 if (num_llis >= MAX_NUM_TSFR_LLIS) {
903 dev_err(&pl08x->adev->dev,
904 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
905 __func__, (u32) MAX_NUM_TSFR_LLIS);
906 return 0;
907 }
e8689e63 908 }
b58b6b5b
RKAL
909
910 llis_va = txd->llis_va;
94ae8522 911 /* The final LLI terminates the LLI. */
bfddfb45 912 llis_va[num_llis - 1].lli = 0;
94ae8522 913 /* The final LLI element shall also fire an interrupt. */
b58b6b5b 914 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
e8689e63 915
e8689e63
LW
916#ifdef VERBOSE_DEBUG
917 {
918 int i;
919
fc74eb79
RKAL
920 dev_vdbg(&pl08x->adev->dev,
921 "%-3s %-9s %-10s %-10s %-10s %s\n",
922 "lli", "", "csrc", "cdst", "clli", "cctl");
e8689e63
LW
923 for (i = 0; i < num_llis; i++) {
924 dev_vdbg(&pl08x->adev->dev,
fc74eb79
RKAL
925 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
926 i, &llis_va[i], llis_va[i].src,
927 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
e8689e63
LW
928 );
929 }
930 }
931#endif
932
933 return num_llis;
934}
935
936/* You should call this with the struct pl08x lock held */
937static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
938 struct pl08x_txd *txd)
939{
b7f69d9d
VK
940 struct pl08x_sg *dsg, *_dsg;
941
e8689e63 942 /* Free the LLI */
c1205646
VK
943 if (txd->llis_va)
944 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
e8689e63
LW
945
946 pl08x->pool_ctr--;
947
b7f69d9d
VK
948 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
949 list_del(&dsg->node);
950 kfree(dsg);
951 }
952
e8689e63
LW
953 kfree(txd);
954}
955
956static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
957 struct pl08x_dma_chan *plchan)
958{
959 struct pl08x_txd *txdi = NULL;
960 struct pl08x_txd *next;
961
15c17232 962 if (!list_empty(&plchan->pend_list)) {
e8689e63 963 list_for_each_entry_safe(txdi,
15c17232 964 next, &plchan->pend_list, node) {
e8689e63
LW
965 list_del(&txdi->node);
966 pl08x_free_txd(pl08x, txdi);
967 }
e8689e63
LW
968 }
969}
970
971/*
972 * The DMA ENGINE API
973 */
974static int pl08x_alloc_chan_resources(struct dma_chan *chan)
975{
976 return 0;
977}
978
979static void pl08x_free_chan_resources(struct dma_chan *chan)
980{
981}
982
983/*
984 * This should be called with the channel plchan->lock held
985 */
986static int prep_phy_channel(struct pl08x_dma_chan *plchan,
987 struct pl08x_txd *txd)
988{
989 struct pl08x_driver_data *pl08x = plchan->host;
990 struct pl08x_phy_chan *ch;
991 int ret;
992
993 /* Check if we already have a channel */
8f0d30f9
VK
994 if (plchan->phychan) {
995 ch = plchan->phychan;
996 goto got_channel;
997 }
e8689e63
LW
998
999 ch = pl08x_get_phy_channel(pl08x, plchan);
1000 if (!ch) {
1001 /* No physical channel available, cope with it */
1002 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
1003 return -EBUSY;
1004 }
1005
1006 /*
1007 * OK we have a physical channel: for memcpy() this is all we
1008 * need, but for slaves the physical signals may be muxed!
1009 * Can the platform allow us to use this channel?
1010 */
16ca8105 1011 if (plchan->slave && pl08x->pd->get_signal) {
aeea1808 1012 ret = pl08x->pd->get_signal(plchan->cd);
e8689e63
LW
1013 if (ret < 0) {
1014 dev_dbg(&pl08x->adev->dev,
1015 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1016 ch->id, plchan->name);
1017 /* Release physical channel & return */
1018 pl08x_put_phy_channel(pl08x, ch);
1019 return -EBUSY;
1020 }
1021 ch->signal = ret;
1022 }
1023
8f0d30f9 1024 plchan->phychan = ch;
e8689e63
LW
1025 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
1026 ch->id,
1027 ch->signal,
1028 plchan->name);
1029
8f0d30f9
VK
1030got_channel:
1031 /* Assign the flow control signal to this channel */
1032 if (txd->direction == DMA_MEM_TO_DEV)
1033 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
1034 else if (txd->direction == DMA_DEV_TO_MEM)
1035 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1036
8087aacd 1037 plchan->phychan_hold++;
e8689e63
LW
1038
1039 return 0;
1040}
1041
8c8cc2b1
RKAL
1042static void release_phy_channel(struct pl08x_dma_chan *plchan)
1043{
1044 struct pl08x_driver_data *pl08x = plchan->host;
1045
1046 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
aeea1808 1047 pl08x->pd->put_signal(plchan->cd, plchan->phychan->signal);
8c8cc2b1
RKAL
1048 plchan->phychan->signal = -1;
1049 }
1050 pl08x_put_phy_channel(pl08x, plchan->phychan);
1051 plchan->phychan = NULL;
1052}
1053
e8689e63
LW
1054static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1055{
1056 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
501e67e8 1057 struct pl08x_txd *txd = to_pl08x_txd(tx);
c370e594 1058 unsigned long flags;
884485e1 1059 dma_cookie_t cookie;
c370e594
RKAL
1060
1061 spin_lock_irqsave(&plchan->lock, flags);
884485e1 1062 cookie = dma_cookie_assign(tx);
501e67e8
RKAL
1063
1064 /* Put this onto the pending list */
1065 list_add_tail(&txd->node, &plchan->pend_list);
1066
1067 /*
1068 * If there was no physical channel available for this memcpy,
1069 * stack the request up and indicate that the channel is waiting
1070 * for a free physical channel.
1071 */
1072 if (!plchan->slave && !plchan->phychan) {
1073 /* Do this memcpy whenever there is a channel ready */
1074 plchan->state = PL08X_CHAN_WAITING;
1075 plchan->waiting = txd;
8087aacd
RKAL
1076 } else {
1077 plchan->phychan_hold--;
501e67e8
RKAL
1078 }
1079
c370e594 1080 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63 1081
884485e1 1082 return cookie;
e8689e63
LW
1083}
1084
1085static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1086 struct dma_chan *chan, unsigned long flags)
1087{
1088 struct dma_async_tx_descriptor *retval = NULL;
1089
1090 return retval;
1091}
1092
1093/*
94ae8522
RKAL
1094 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1095 * If slaves are relying on interrupts to signal completion this function
1096 * must not be called with interrupts disabled.
e8689e63 1097 */
3e27ee84
VK
1098static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1099 dma_cookie_t cookie, struct dma_tx_state *txstate)
e8689e63
LW
1100{
1101 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
e8689e63 1102 enum dma_status ret;
e8689e63 1103
96a2af41
RKAL
1104 ret = dma_cookie_status(chan, cookie, txstate);
1105 if (ret == DMA_SUCCESS)
e8689e63 1106 return ret;
e8689e63 1107
e8689e63
LW
1108 /*
1109 * This cookie not complete yet
96a2af41 1110 * Get number of bytes left in the active transactions and queue
e8689e63 1111 */
96a2af41 1112 dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
e8689e63
LW
1113
1114 if (plchan->state == PL08X_CHAN_PAUSED)
1115 return DMA_PAUSED;
1116
1117 /* Whether waiting or running, we're in progress */
1118 return DMA_IN_PROGRESS;
1119}
1120
1121/* PrimeCell DMA extension */
1122struct burst_table {
760596c6 1123 u32 burstwords;
e8689e63
LW
1124 u32 reg;
1125};
1126
1127static const struct burst_table burst_sizes[] = {
1128 {
1129 .burstwords = 256,
760596c6 1130 .reg = PL080_BSIZE_256,
e8689e63
LW
1131 },
1132 {
1133 .burstwords = 128,
760596c6 1134 .reg = PL080_BSIZE_128,
e8689e63
LW
1135 },
1136 {
1137 .burstwords = 64,
760596c6 1138 .reg = PL080_BSIZE_64,
e8689e63
LW
1139 },
1140 {
1141 .burstwords = 32,
760596c6 1142 .reg = PL080_BSIZE_32,
e8689e63
LW
1143 },
1144 {
1145 .burstwords = 16,
760596c6 1146 .reg = PL080_BSIZE_16,
e8689e63
LW
1147 },
1148 {
1149 .burstwords = 8,
760596c6 1150 .reg = PL080_BSIZE_8,
e8689e63
LW
1151 },
1152 {
1153 .burstwords = 4,
760596c6 1154 .reg = PL080_BSIZE_4,
e8689e63
LW
1155 },
1156 {
760596c6
RKAL
1157 .burstwords = 0,
1158 .reg = PL080_BSIZE_1,
e8689e63
LW
1159 },
1160};
1161
121c8476
RKAL
1162/*
1163 * Given the source and destination available bus masks, select which
1164 * will be routed to each port. We try to have source and destination
1165 * on separate ports, but always respect the allowable settings.
1166 */
1167static u32 pl08x_select_bus(u8 src, u8 dst)
1168{
1169 u32 cctl = 0;
1170
1171 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1172 cctl |= PL080_CONTROL_DST_AHB2;
1173 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1174 cctl |= PL080_CONTROL_SRC_AHB2;
1175
1176 return cctl;
1177}
1178
f14c426c
RKAL
1179static u32 pl08x_cctl(u32 cctl)
1180{
1181 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1182 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1183 PL080_CONTROL_PROT_MASK);
1184
1185 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1186 return cctl | PL080_CONTROL_PROT_SYS;
1187}
1188
aa88cdaa
RKAL
1189static u32 pl08x_width(enum dma_slave_buswidth width)
1190{
1191 switch (width) {
1192 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1193 return PL080_WIDTH_8BIT;
1194 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1195 return PL080_WIDTH_16BIT;
1196 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1197 return PL080_WIDTH_32BIT;
f32807f1
VK
1198 default:
1199 return ~0;
aa88cdaa 1200 }
aa88cdaa
RKAL
1201}
1202
760596c6
RKAL
1203static u32 pl08x_burst(u32 maxburst)
1204{
1205 int i;
1206
1207 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1208 if (burst_sizes[i].burstwords <= maxburst)
1209 break;
1210
1211 return burst_sizes[i].reg;
1212}
1213
f0fd9446
RKAL
1214static int dma_set_runtime_config(struct dma_chan *chan,
1215 struct dma_slave_config *config)
e8689e63
LW
1216{
1217 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1218 struct pl08x_driver_data *pl08x = plchan->host;
e8689e63 1219 enum dma_slave_buswidth addr_width;
760596c6 1220 u32 width, burst, maxburst;
e8689e63 1221 u32 cctl = 0;
b7f75865
RKAL
1222
1223 if (!plchan->slave)
1224 return -EINVAL;
e8689e63
LW
1225
1226 /* Transfer direction */
1227 plchan->runtime_direction = config->direction;
db8196df 1228 if (config->direction == DMA_MEM_TO_DEV) {
e8689e63
LW
1229 addr_width = config->dst_addr_width;
1230 maxburst = config->dst_maxburst;
db8196df 1231 } else if (config->direction == DMA_DEV_TO_MEM) {
e8689e63
LW
1232 addr_width = config->src_addr_width;
1233 maxburst = config->src_maxburst;
1234 } else {
1235 dev_err(&pl08x->adev->dev,
1236 "bad runtime_config: alien transfer direction\n");
f0fd9446 1237 return -EINVAL;
e8689e63
LW
1238 }
1239
aa88cdaa
RKAL
1240 width = pl08x_width(addr_width);
1241 if (width == ~0) {
e8689e63
LW
1242 dev_err(&pl08x->adev->dev,
1243 "bad runtime_config: alien address width\n");
f0fd9446 1244 return -EINVAL;
e8689e63
LW
1245 }
1246
ed91c13d
RK
1247 plchan->cfg = *config;
1248
aa88cdaa
RKAL
1249 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1250 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1251
e8689e63 1252 /*
4440aacf
RKAL
1253 * If this channel will only request single transfers, set this
1254 * down to ONE element. Also select one element if no maxburst
1255 * is specified.
e8689e63 1256 */
760596c6
RKAL
1257 if (plchan->cd->single)
1258 maxburst = 1;
1259
1260 burst = pl08x_burst(maxburst);
1261 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1262 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
e8689e63 1263
8c9f7aa3
VK
1264 plchan->device_fc = config->device_fc;
1265
db8196df 1266 if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
121c8476
RKAL
1267 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
1268 pl08x_select_bus(plchan->cd->periph_buses,
1269 pl08x->mem_buses);
b207b4d0 1270 } else {
121c8476
RKAL
1271 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1272 pl08x_select_bus(pl08x->mem_buses,
1273 plchan->cd->periph_buses);
b207b4d0 1274 }
f0fd9446 1275
e8689e63
LW
1276 dev_dbg(&pl08x->adev->dev,
1277 "configured channel %s (%s) for %s, data width %d, "
4983a04f 1278 "maxburst %d words, LE, CCTL=0x%08x\n",
e8689e63 1279 dma_chan_name(chan), plchan->name,
db8196df 1280 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
e8689e63
LW
1281 addr_width,
1282 maxburst,
4983a04f 1283 cctl);
f0fd9446
RKAL
1284
1285 return 0;
e8689e63
LW
1286}
1287
1288/*
1289 * Slave transactions callback to the slave device to allow
1290 * synchronization of slave DMA signals with the DMAC enable
1291 */
1292static void pl08x_issue_pending(struct dma_chan *chan)
1293{
1294 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
e8689e63
LW
1295 unsigned long flags;
1296
1297 spin_lock_irqsave(&plchan->lock, flags);
9c0bb43b
RKAL
1298 /* Something is already active, or we're waiting for a channel... */
1299 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1300 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63 1301 return;
9c0bb43b 1302 }
e8689e63
LW
1303
1304 /* Take the first element in the queue and execute it */
15c17232 1305 if (!list_empty(&plchan->pend_list)) {
e8689e63
LW
1306 struct pl08x_txd *next;
1307
15c17232 1308 next = list_first_entry(&plchan->pend_list,
e8689e63
LW
1309 struct pl08x_txd,
1310 node);
1311 list_del(&next->node);
e8689e63
LW
1312 plchan->state = PL08X_CHAN_RUNNING;
1313
c885bee4 1314 pl08x_start_txd(plchan, next);
e8689e63
LW
1315 }
1316
1317 spin_unlock_irqrestore(&plchan->lock, flags);
1318}
1319
1320static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1321 struct pl08x_txd *txd)
1322{
e8689e63 1323 struct pl08x_driver_data *pl08x = plchan->host;
c370e594
RKAL
1324 unsigned long flags;
1325 int num_llis, ret;
e8689e63
LW
1326
1327 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
dafa7317 1328 if (!num_llis) {
57001a60
VK
1329 spin_lock_irqsave(&plchan->lock, flags);
1330 pl08x_free_txd(pl08x, txd);
1331 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63 1332 return -EINVAL;
dafa7317 1333 }
e8689e63 1334
c370e594 1335 spin_lock_irqsave(&plchan->lock, flags);
e8689e63 1336
e8689e63
LW
1337 /*
1338 * See if we already have a physical channel allocated,
1339 * else this is the time to try to get one.
1340 */
1341 ret = prep_phy_channel(plchan, txd);
1342 if (ret) {
1343 /*
501e67e8
RKAL
1344 * No physical channel was available.
1345 *
1346 * memcpy transfers can be sorted out at submission time.
1347 *
1348 * Slave transfers may have been denied due to platform
1349 * channel muxing restrictions. Since there is no guarantee
1350 * that this will ever be resolved, and the signal must be
1351 * acquired AFTER acquiring the physical channel, we will let
1352 * them be NACK:ed with -EBUSY here. The drivers can retry
1353 * the prep() call if they are eager on doing this using DMA.
e8689e63
LW
1354 */
1355 if (plchan->slave) {
1356 pl08x_free_txd_list(pl08x, plchan);
501e67e8 1357 pl08x_free_txd(pl08x, txd);
c370e594 1358 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63
LW
1359 return -EBUSY;
1360 }
e8689e63
LW
1361 } else
1362 /*
94ae8522
RKAL
1363 * Else we're all set, paused and ready to roll, status
1364 * will switch to PL08X_CHAN_RUNNING when we call
1365 * issue_pending(). If there is something running on the
1366 * channel already we don't change its state.
e8689e63
LW
1367 */
1368 if (plchan->state == PL08X_CHAN_IDLE)
1369 plchan->state = PL08X_CHAN_PAUSED;
1370
c370e594 1371 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63
LW
1372
1373 return 0;
1374}
1375
c0428794
RKAL
1376static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1377 unsigned long flags)
ac3cd20d 1378{
b201c111 1379 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
ac3cd20d
RKAL
1380
1381 if (txd) {
1382 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
c0428794 1383 txd->tx.flags = flags;
ac3cd20d
RKAL
1384 txd->tx.tx_submit = pl08x_tx_submit;
1385 INIT_LIST_HEAD(&txd->node);
b7f69d9d 1386 INIT_LIST_HEAD(&txd->dsg_list);
4983a04f
RKAL
1387
1388 /* Always enable error and terminal interrupts */
1389 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1390 PL080_CONFIG_TC_IRQ_MASK;
ac3cd20d
RKAL
1391 }
1392 return txd;
1393}
1394
e8689e63
LW
1395/*
1396 * Initialize a descriptor to be used by memcpy submit
1397 */
1398static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1399 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1400 size_t len, unsigned long flags)
1401{
1402 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1403 struct pl08x_driver_data *pl08x = plchan->host;
1404 struct pl08x_txd *txd;
b7f69d9d 1405 struct pl08x_sg *dsg;
e8689e63
LW
1406 int ret;
1407
c0428794 1408 txd = pl08x_get_txd(plchan, flags);
e8689e63
LW
1409 if (!txd) {
1410 dev_err(&pl08x->adev->dev,
1411 "%s no memory for descriptor\n", __func__);
1412 return NULL;
1413 }
1414
b7f69d9d
VK
1415 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1416 if (!dsg) {
1417 pl08x_free_txd(pl08x, txd);
1418 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1419 __func__);
1420 return NULL;
1421 }
1422 list_add_tail(&dsg->node, &txd->dsg_list);
1423
92d2fd61 1424 txd->direction = DMA_MEM_TO_MEM;
b7f69d9d
VK
1425 dsg->src_addr = src;
1426 dsg->dst_addr = dest;
1427 dsg->len = len;
e8689e63
LW
1428
1429 /* Set platform data for m2m */
4983a04f 1430 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
c7da9a56
RKAL
1431 txd->cctl = pl08x->pd->memcpy_channel.cctl &
1432 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
4983a04f 1433
e8689e63 1434 /* Both to be incremented or the code will break */
70b5ed6b 1435 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
c7da9a56 1436
c7da9a56 1437 if (pl08x->vd->dualmaster)
121c8476
RKAL
1438 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1439 pl08x->mem_buses);
e8689e63 1440
e8689e63
LW
1441 ret = pl08x_prep_channel_resources(plchan, txd);
1442 if (ret)
1443 return NULL;
e8689e63
LW
1444
1445 return &txd->tx;
1446}
1447
3e2a037c 1448static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
e8689e63 1449 struct dma_chan *chan, struct scatterlist *sgl,
db8196df 1450 unsigned int sg_len, enum dma_transfer_direction direction,
185ecb5f 1451 unsigned long flags, void *context)
e8689e63
LW
1452{
1453 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1454 struct pl08x_driver_data *pl08x = plchan->host;
1455 struct pl08x_txd *txd;
b7f69d9d
VK
1456 struct pl08x_sg *dsg;
1457 struct scatterlist *sg;
1458 dma_addr_t slave_addr;
0a235657 1459 int ret, tmp;
e8689e63 1460
e8689e63 1461 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
fdaf9c4b 1462 __func__, sg_dma_len(sgl), plchan->name);
e8689e63 1463
c0428794 1464 txd = pl08x_get_txd(plchan, flags);
e8689e63
LW
1465 if (!txd) {
1466 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1467 return NULL;
1468 }
1469
e8689e63
LW
1470 if (direction != plchan->runtime_direction)
1471 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1472 "the direction configured for the PrimeCell\n",
1473 __func__);
1474
1475 /*
1476 * Set up addresses, the PrimeCell configured address
1477 * will take precedence since this may configure the
1478 * channel target address dynamically at runtime.
1479 */
1480 txd->direction = direction;
c7da9a56 1481
db8196df 1482 if (direction == DMA_MEM_TO_DEV) {
121c8476 1483 txd->cctl = plchan->dst_cctl;
ed91c13d 1484 slave_addr = plchan->cfg.dst_addr;
db8196df 1485 } else if (direction == DMA_DEV_TO_MEM) {
121c8476 1486 txd->cctl = plchan->src_cctl;
ed91c13d 1487 slave_addr = plchan->cfg.src_addr;
e8689e63 1488 } else {
b7f69d9d 1489 pl08x_free_txd(pl08x, txd);
e8689e63
LW
1490 dev_err(&pl08x->adev->dev,
1491 "%s direction unsupported\n", __func__);
1492 return NULL;
1493 }
e8689e63 1494
8c9f7aa3 1495 if (plchan->device_fc)
db8196df 1496 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
0a235657
VK
1497 PL080_FLOW_PER2MEM_PER;
1498 else
db8196df 1499 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
0a235657
VK
1500 PL080_FLOW_PER2MEM;
1501
1502 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1503
b7f69d9d
VK
1504 for_each_sg(sgl, sg, sg_len, tmp) {
1505 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1506 if (!dsg) {
1507 pl08x_free_txd(pl08x, txd);
1508 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1509 __func__);
1510 return NULL;
1511 }
1512 list_add_tail(&dsg->node, &txd->dsg_list);
1513
1514 dsg->len = sg_dma_len(sg);
db8196df 1515 if (direction == DMA_MEM_TO_DEV) {
cbb796cc 1516 dsg->src_addr = sg_dma_address(sg);
b7f69d9d
VK
1517 dsg->dst_addr = slave_addr;
1518 } else {
1519 dsg->src_addr = slave_addr;
cbb796cc 1520 dsg->dst_addr = sg_dma_address(sg);
b7f69d9d
VK
1521 }
1522 }
1523
e8689e63
LW
1524 ret = pl08x_prep_channel_resources(plchan, txd);
1525 if (ret)
1526 return NULL;
e8689e63
LW
1527
1528 return &txd->tx;
1529}
1530
1531static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1532 unsigned long arg)
1533{
1534 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1535 struct pl08x_driver_data *pl08x = plchan->host;
1536 unsigned long flags;
1537 int ret = 0;
1538
1539 /* Controls applicable to inactive channels */
1540 if (cmd == DMA_SLAVE_CONFIG) {
f0fd9446
RKAL
1541 return dma_set_runtime_config(chan,
1542 (struct dma_slave_config *)arg);
e8689e63
LW
1543 }
1544
1545 /*
1546 * Anything succeeds on channels with no physical allocation and
1547 * no queued transfers.
1548 */
1549 spin_lock_irqsave(&plchan->lock, flags);
1550 if (!plchan->phychan && !plchan->at) {
1551 spin_unlock_irqrestore(&plchan->lock, flags);
1552 return 0;
1553 }
1554
1555 switch (cmd) {
1556 case DMA_TERMINATE_ALL:
1557 plchan->state = PL08X_CHAN_IDLE;
1558
1559 if (plchan->phychan) {
fb526210 1560 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
e8689e63
LW
1561
1562 /*
1563 * Mark physical channel as free and free any slave
1564 * signal
1565 */
8c8cc2b1 1566 release_phy_channel(plchan);
88c08a3f 1567 plchan->phychan_hold = 0;
e8689e63 1568 }
e8689e63
LW
1569 /* Dequeue jobs and free LLIs */
1570 if (plchan->at) {
1571 pl08x_free_txd(pl08x, plchan->at);
1572 plchan->at = NULL;
1573 }
1574 /* Dequeue jobs not yet fired as well */
1575 pl08x_free_txd_list(pl08x, plchan);
1576 break;
1577 case DMA_PAUSE:
1578 pl08x_pause_phy_chan(plchan->phychan);
1579 plchan->state = PL08X_CHAN_PAUSED;
1580 break;
1581 case DMA_RESUME:
1582 pl08x_resume_phy_chan(plchan->phychan);
1583 plchan->state = PL08X_CHAN_RUNNING;
1584 break;
1585 default:
1586 /* Unknown command */
1587 ret = -ENXIO;
1588 break;
1589 }
1590
1591 spin_unlock_irqrestore(&plchan->lock, flags);
1592
1593 return ret;
1594}
1595
1596bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1597{
7703eac9 1598 struct pl08x_dma_chan *plchan;
e8689e63
LW
1599 char *name = chan_id;
1600
7703eac9
RKAL
1601 /* Reject channels for devices not bound to this driver */
1602 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1603 return false;
1604
1605 plchan = to_pl08x_chan(chan);
1606
e8689e63
LW
1607 /* Check that the channel is not taken! */
1608 if (!strcmp(plchan->name, name))
1609 return true;
1610
1611 return false;
1612}
1613
1614/*
1615 * Just check that the device is there and active
94ae8522
RKAL
1616 * TODO: turn this bit on/off depending on the number of physical channels
1617 * actually used, if it is zero... well shut it off. That will save some
1618 * power. Cut the clock at the same time.
e8689e63
LW
1619 */
1620static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1621{
affa115e
LW
1622 /* The Nomadik variant does not have the config register */
1623 if (pl08x->vd->nomadik)
1624 return;
48a59ef3 1625 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
e8689e63
LW
1626}
1627
3d992e1a
RKAL
1628static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1629{
1630 struct device *dev = txd->tx.chan->device->dev;
b7f69d9d 1631 struct pl08x_sg *dsg;
3d992e1a
RKAL
1632
1633 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1634 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
b7f69d9d
VK
1635 list_for_each_entry(dsg, &txd->dsg_list, node)
1636 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1637 DMA_TO_DEVICE);
1638 else {
1639 list_for_each_entry(dsg, &txd->dsg_list, node)
1640 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1641 DMA_TO_DEVICE);
1642 }
3d992e1a
RKAL
1643 }
1644 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1645 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
b7f69d9d
VK
1646 list_for_each_entry(dsg, &txd->dsg_list, node)
1647 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1648 DMA_FROM_DEVICE);
3d992e1a 1649 else
b7f69d9d
VK
1650 list_for_each_entry(dsg, &txd->dsg_list, node)
1651 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1652 DMA_FROM_DEVICE);
3d992e1a
RKAL
1653 }
1654}
1655
e8689e63
LW
1656static void pl08x_tasklet(unsigned long data)
1657{
1658 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
e8689e63 1659 struct pl08x_driver_data *pl08x = plchan->host;
858c21c0 1660 struct pl08x_txd *txd;
bf072af4 1661 unsigned long flags;
e8689e63 1662
bf072af4 1663 spin_lock_irqsave(&plchan->lock, flags);
e8689e63 1664
858c21c0
RKAL
1665 txd = plchan->at;
1666 plchan->at = NULL;
e8689e63 1667
858c21c0 1668 if (txd) {
94ae8522 1669 /* Update last completed */
f7fbce07 1670 dma_cookie_complete(&txd->tx);
e8689e63 1671 }
8087aacd 1672
94ae8522 1673 /* If a new descriptor is queued, set it up plchan->at is NULL here */
15c17232 1674 if (!list_empty(&plchan->pend_list)) {
e8689e63
LW
1675 struct pl08x_txd *next;
1676
15c17232 1677 next = list_first_entry(&plchan->pend_list,
e8689e63
LW
1678 struct pl08x_txd,
1679 node);
1680 list_del(&next->node);
c885bee4
RKAL
1681
1682 pl08x_start_txd(plchan, next);
8087aacd
RKAL
1683 } else if (plchan->phychan_hold) {
1684 /*
1685 * This channel is still in use - we have a new txd being
1686 * prepared and will soon be queued. Don't give up the
1687 * physical channel.
1688 */
e8689e63
LW
1689 } else {
1690 struct pl08x_dma_chan *waiting = NULL;
1691
1692 /*
1693 * No more jobs, so free up the physical channel
1694 * Free any allocated signal on slave transfers too
1695 */
8c8cc2b1 1696 release_phy_channel(plchan);
e8689e63
LW
1697 plchan->state = PL08X_CHAN_IDLE;
1698
1699 /*
94ae8522
RKAL
1700 * And NOW before anyone else can grab that free:d up
1701 * physical channel, see if there is some memcpy pending
1702 * that seriously needs to start because of being stacked
1703 * up while we were choking the physical channels with data.
e8689e63
LW
1704 */
1705 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1706 chan.device_node) {
3e27ee84
VK
1707 if (waiting->state == PL08X_CHAN_WAITING &&
1708 waiting->waiting != NULL) {
e8689e63
LW
1709 int ret;
1710
1711 /* This should REALLY not fail now */
1712 ret = prep_phy_channel(waiting,
1713 waiting->waiting);
1714 BUG_ON(ret);
8087aacd 1715 waiting->phychan_hold--;
e8689e63
LW
1716 waiting->state = PL08X_CHAN_RUNNING;
1717 waiting->waiting = NULL;
1718 pl08x_issue_pending(&waiting->chan);
1719 break;
1720 }
1721 }
1722 }
1723
bf072af4 1724 spin_unlock_irqrestore(&plchan->lock, flags);
858c21c0 1725
3d992e1a
RKAL
1726 if (txd) {
1727 dma_async_tx_callback callback = txd->tx.callback;
1728 void *callback_param = txd->tx.callback_param;
1729
1730 /* Don't try to unmap buffers on slave channels */
1731 if (!plchan->slave)
1732 pl08x_unmap_buffers(txd);
1733
1734 /* Free the descriptor */
1735 spin_lock_irqsave(&plchan->lock, flags);
1736 pl08x_free_txd(pl08x, txd);
1737 spin_unlock_irqrestore(&plchan->lock, flags);
1738
1739 /* Callback to signal completion */
1740 if (callback)
1741 callback(callback_param);
1742 }
e8689e63
LW
1743}
1744
1745static irqreturn_t pl08x_irq(int irq, void *dev)
1746{
1747 struct pl08x_driver_data *pl08x = dev;
28da2836
VK
1748 u32 mask = 0, err, tc, i;
1749
1750 /* check & clear - ERR & TC interrupts */
1751 err = readl(pl08x->base + PL080_ERR_STATUS);
1752 if (err) {
1753 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1754 __func__, err);
1755 writel(err, pl08x->base + PL080_ERR_CLEAR);
e8689e63 1756 }
d29bf019 1757 tc = readl(pl08x->base + PL080_TC_STATUS);
28da2836
VK
1758 if (tc)
1759 writel(tc, pl08x->base + PL080_TC_CLEAR);
1760
1761 if (!err && !tc)
1762 return IRQ_NONE;
1763
e8689e63 1764 for (i = 0; i < pl08x->vd->channels; i++) {
28da2836 1765 if (((1 << i) & err) || ((1 << i) & tc)) {
e8689e63
LW
1766 /* Locate physical channel */
1767 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1768 struct pl08x_dma_chan *plchan = phychan->serving;
1769
28da2836
VK
1770 if (!plchan) {
1771 dev_err(&pl08x->adev->dev,
1772 "%s Error TC interrupt on unused channel: 0x%08x\n",
1773 __func__, i);
1774 continue;
1775 }
1776
e8689e63
LW
1777 /* Schedule tasklet on this channel */
1778 tasklet_schedule(&plchan->tasklet);
e8689e63
LW
1779 mask |= (1 << i);
1780 }
1781 }
e8689e63
LW
1782
1783 return mask ? IRQ_HANDLED : IRQ_NONE;
1784}
1785
121c8476
RKAL
1786static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1787{
1788 u32 cctl = pl08x_cctl(chan->cd->cctl);
1789
1790 chan->slave = true;
1791 chan->name = chan->cd->bus_id;
ed91c13d
RK
1792 chan->cfg.src_addr = chan->cd->addr;
1793 chan->cfg.dst_addr = chan->cd->addr;
121c8476
RKAL
1794 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1795 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1796 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1797 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1798}
1799
e8689e63
LW
1800/*
1801 * Initialise the DMAC memcpy/slave channels.
1802 * Make a local wrapper to hold required data
1803 */
1804static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
3e27ee84 1805 struct dma_device *dmadev, unsigned int channels, bool slave)
e8689e63
LW
1806{
1807 struct pl08x_dma_chan *chan;
1808 int i;
1809
1810 INIT_LIST_HEAD(&dmadev->channels);
94ae8522 1811
e8689e63
LW
1812 /*
1813 * Register as many many memcpy as we have physical channels,
1814 * we won't always be able to use all but the code will have
1815 * to cope with that situation.
1816 */
1817 for (i = 0; i < channels; i++) {
b201c111 1818 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
e8689e63
LW
1819 if (!chan) {
1820 dev_err(&pl08x->adev->dev,
1821 "%s no memory for channel\n", __func__);
1822 return -ENOMEM;
1823 }
1824
1825 chan->host = pl08x;
1826 chan->state = PL08X_CHAN_IDLE;
1827
1828 if (slave) {
e8689e63 1829 chan->cd = &pl08x->pd->slave_channels[i];
121c8476 1830 pl08x_dma_slave_init(chan);
e8689e63
LW
1831 } else {
1832 chan->cd = &pl08x->pd->memcpy_channel;
1833 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1834 if (!chan->name) {
1835 kfree(chan);
1836 return -ENOMEM;
1837 }
1838 }
175a5e61 1839 dev_dbg(&pl08x->adev->dev,
e8689e63
LW
1840 "initialize virtual channel \"%s\"\n",
1841 chan->name);
1842
1843 chan->chan.device = dmadev;
d3ee98cd 1844 dma_cookie_init(&chan->chan);
e8689e63
LW
1845
1846 spin_lock_init(&chan->lock);
15c17232 1847 INIT_LIST_HEAD(&chan->pend_list);
e8689e63
LW
1848 tasklet_init(&chan->tasklet, pl08x_tasklet,
1849 (unsigned long) chan);
1850
1851 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1852 }
1853 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1854 i, slave ? "slave" : "memcpy");
1855 return i;
1856}
1857
1858static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1859{
1860 struct pl08x_dma_chan *chan = NULL;
1861 struct pl08x_dma_chan *next;
1862
1863 list_for_each_entry_safe(chan,
1864 next, &dmadev->channels, chan.device_node) {
1865 list_del(&chan->chan.device_node);
1866 kfree(chan);
1867 }
1868}
1869
1870#ifdef CONFIG_DEBUG_FS
1871static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1872{
1873 switch (state) {
1874 case PL08X_CHAN_IDLE:
1875 return "idle";
1876 case PL08X_CHAN_RUNNING:
1877 return "running";
1878 case PL08X_CHAN_PAUSED:
1879 return "paused";
1880 case PL08X_CHAN_WAITING:
1881 return "waiting";
1882 default:
1883 break;
1884 }
1885 return "UNKNOWN STATE";
1886}
1887
1888static int pl08x_debugfs_show(struct seq_file *s, void *data)
1889{
1890 struct pl08x_driver_data *pl08x = s->private;
1891 struct pl08x_dma_chan *chan;
1892 struct pl08x_phy_chan *ch;
1893 unsigned long flags;
1894 int i;
1895
1896 seq_printf(s, "PL08x physical channels:\n");
1897 seq_printf(s, "CHANNEL:\tUSER:\n");
1898 seq_printf(s, "--------\t-----\n");
1899 for (i = 0; i < pl08x->vd->channels; i++) {
1900 struct pl08x_dma_chan *virt_chan;
1901
1902 ch = &pl08x->phy_chans[i];
1903
1904 spin_lock_irqsave(&ch->lock, flags);
1905 virt_chan = ch->serving;
1906
affa115e
LW
1907 seq_printf(s, "%d\t\t%s%s\n",
1908 ch->id,
1909 virt_chan ? virt_chan->name : "(none)",
1910 ch->locked ? " LOCKED" : "");
e8689e63
LW
1911
1912 spin_unlock_irqrestore(&ch->lock, flags);
1913 }
1914
1915 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1916 seq_printf(s, "CHANNEL:\tSTATE:\n");
1917 seq_printf(s, "--------\t------\n");
1918 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
3e2a037c 1919 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
1920 pl08x_state_str(chan->state));
1921 }
1922
1923 seq_printf(s, "\nPL08x virtual slave channels:\n");
1924 seq_printf(s, "CHANNEL:\tSTATE:\n");
1925 seq_printf(s, "--------\t------\n");
1926 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
3e2a037c 1927 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
1928 pl08x_state_str(chan->state));
1929 }
1930
1931 return 0;
1932}
1933
1934static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1935{
1936 return single_open(file, pl08x_debugfs_show, inode->i_private);
1937}
1938
1939static const struct file_operations pl08x_debugfs_operations = {
1940 .open = pl08x_debugfs_open,
1941 .read = seq_read,
1942 .llseek = seq_lseek,
1943 .release = single_release,
1944};
1945
1946static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1947{
1948 /* Expose a simple debugfs interface to view all clocks */
3e27ee84
VK
1949 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1950 S_IFREG | S_IRUGO, NULL, pl08x,
1951 &pl08x_debugfs_operations);
e8689e63
LW
1952}
1953
1954#else
1955static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1956{
1957}
1958#endif
1959
aa25afad 1960static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
e8689e63
LW
1961{
1962 struct pl08x_driver_data *pl08x;
f96ca9ec 1963 const struct vendor_data *vd = id->data;
e8689e63
LW
1964 int ret = 0;
1965 int i;
1966
1967 ret = amba_request_regions(adev, NULL);
1968 if (ret)
1969 return ret;
1970
1971 /* Create the driver state holder */
b201c111 1972 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
e8689e63
LW
1973 if (!pl08x) {
1974 ret = -ENOMEM;
1975 goto out_no_pl08x;
1976 }
1977
1978 /* Initialize memcpy engine */
1979 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1980 pl08x->memcpy.dev = &adev->dev;
1981 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1982 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1983 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1984 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1985 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1986 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1987 pl08x->memcpy.device_control = pl08x_control;
1988
1989 /* Initialize slave engine */
1990 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1991 pl08x->slave.dev = &adev->dev;
1992 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1993 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1994 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1995 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1996 pl08x->slave.device_issue_pending = pl08x_issue_pending;
1997 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1998 pl08x->slave.device_control = pl08x_control;
1999
2000 /* Get the platform data */
2001 pl08x->pd = dev_get_platdata(&adev->dev);
2002 if (!pl08x->pd) {
2003 dev_err(&adev->dev, "no platform data supplied\n");
2004 goto out_no_platdata;
2005 }
2006
2007 /* Assign useful pointers to the driver state */
2008 pl08x->adev = adev;
2009 pl08x->vd = vd;
2010
30749cb4
RKAL
2011 /* By default, AHB1 only. If dualmaster, from platform */
2012 pl08x->lli_buses = PL08X_AHB1;
2013 pl08x->mem_buses = PL08X_AHB1;
2014 if (pl08x->vd->dualmaster) {
2015 pl08x->lli_buses = pl08x->pd->lli_buses;
2016 pl08x->mem_buses = pl08x->pd->mem_buses;
2017 }
2018
e8689e63
LW
2019 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2020 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
2021 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
2022 if (!pl08x->pool) {
2023 ret = -ENOMEM;
2024 goto out_no_lli_pool;
2025 }
2026
e8689e63
LW
2027 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2028 if (!pl08x->base) {
2029 ret = -ENOMEM;
2030 goto out_no_ioremap;
2031 }
2032
2033 /* Turn on the PL08x */
2034 pl08x_ensure_on(pl08x);
2035
94ae8522 2036 /* Attach the interrupt handler */
e8689e63
LW
2037 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2038 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2039
2040 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
b05cd8f4 2041 DRIVER_NAME, pl08x);
e8689e63
LW
2042 if (ret) {
2043 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2044 __func__, adev->irq[0]);
2045 goto out_no_irq;
2046 }
2047
2048 /* Initialize physical channels */
affa115e 2049 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
e8689e63
LW
2050 GFP_KERNEL);
2051 if (!pl08x->phy_chans) {
2052 dev_err(&adev->dev, "%s failed to allocate "
2053 "physical channel holders\n",
2054 __func__);
2055 goto out_no_phychans;
2056 }
2057
2058 for (i = 0; i < vd->channels; i++) {
2059 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2060
2061 ch->id = i;
2062 ch->base = pl08x->base + PL080_Cx_BASE(i);
2063 spin_lock_init(&ch->lock);
e8689e63 2064 ch->signal = -1;
affa115e
LW
2065
2066 /*
2067 * Nomadik variants can have channels that are locked
2068 * down for the secure world only. Lock up these channels
2069 * by perpetually serving a dummy virtual channel.
2070 */
2071 if (vd->nomadik) {
2072 u32 val;
2073
2074 val = readl(ch->base + PL080_CH_CONFIG);
2075 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2076 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2077 ch->locked = true;
2078 }
2079 }
2080
175a5e61
VK
2081 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2082 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
e8689e63
LW
2083 }
2084
2085 /* Register as many memcpy channels as there are physical channels */
2086 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2087 pl08x->vd->channels, false);
2088 if (ret <= 0) {
2089 dev_warn(&pl08x->adev->dev,
2090 "%s failed to enumerate memcpy channels - %d\n",
2091 __func__, ret);
2092 goto out_no_memcpy;
2093 }
2094 pl08x->memcpy.chancnt = ret;
2095
2096 /* Register slave channels */
2097 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
3e27ee84 2098 pl08x->pd->num_slave_channels, true);
e8689e63
LW
2099 if (ret <= 0) {
2100 dev_warn(&pl08x->adev->dev,
2101 "%s failed to enumerate slave channels - %d\n",
2102 __func__, ret);
2103 goto out_no_slave;
2104 }
2105 pl08x->slave.chancnt = ret;
2106
2107 ret = dma_async_device_register(&pl08x->memcpy);
2108 if (ret) {
2109 dev_warn(&pl08x->adev->dev,
2110 "%s failed to register memcpy as an async device - %d\n",
2111 __func__, ret);
2112 goto out_no_memcpy_reg;
2113 }
2114
2115 ret = dma_async_device_register(&pl08x->slave);
2116 if (ret) {
2117 dev_warn(&pl08x->adev->dev,
2118 "%s failed to register slave as an async device - %d\n",
2119 __func__, ret);
2120 goto out_no_slave_reg;
2121 }
2122
2123 amba_set_drvdata(adev, pl08x);
2124 init_pl08x_debugfs(pl08x);
b05cd8f4
RKAL
2125 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2126 amba_part(adev), amba_rev(adev),
2127 (unsigned long long)adev->res.start, adev->irq[0]);
b7b6018b 2128
e8689e63
LW
2129 return 0;
2130
2131out_no_slave_reg:
2132 dma_async_device_unregister(&pl08x->memcpy);
2133out_no_memcpy_reg:
2134 pl08x_free_virtual_channels(&pl08x->slave);
2135out_no_slave:
2136 pl08x_free_virtual_channels(&pl08x->memcpy);
2137out_no_memcpy:
2138 kfree(pl08x->phy_chans);
2139out_no_phychans:
2140 free_irq(adev->irq[0], pl08x);
2141out_no_irq:
2142 iounmap(pl08x->base);
2143out_no_ioremap:
2144 dma_pool_destroy(pl08x->pool);
2145out_no_lli_pool:
2146out_no_platdata:
2147 kfree(pl08x);
2148out_no_pl08x:
2149 amba_release_regions(adev);
2150 return ret;
2151}
2152
2153/* PL080 has 8 channels and the PL080 have just 2 */
2154static struct vendor_data vendor_pl080 = {
e8689e63
LW
2155 .channels = 8,
2156 .dualmaster = true,
2157};
2158
affa115e
LW
2159static struct vendor_data vendor_nomadik = {
2160 .channels = 8,
2161 .dualmaster = true,
2162 .nomadik = true,
2163};
2164
e8689e63 2165static struct vendor_data vendor_pl081 = {
e8689e63
LW
2166 .channels = 2,
2167 .dualmaster = false,
2168};
2169
2170static struct amba_id pl08x_ids[] = {
2171 /* PL080 */
2172 {
2173 .id = 0x00041080,
2174 .mask = 0x000fffff,
2175 .data = &vendor_pl080,
2176 },
2177 /* PL081 */
2178 {
2179 .id = 0x00041081,
2180 .mask = 0x000fffff,
2181 .data = &vendor_pl081,
2182 },
2183 /* Nomadik 8815 PL080 variant */
2184 {
affa115e 2185 .id = 0x00280080,
e8689e63 2186 .mask = 0x00ffffff,
affa115e 2187 .data = &vendor_nomadik,
e8689e63
LW
2188 },
2189 { 0, 0 },
2190};
2191
037566df
DM
2192MODULE_DEVICE_TABLE(amba, pl08x_ids);
2193
e8689e63
LW
2194static struct amba_driver pl08x_amba_driver = {
2195 .drv.name = DRIVER_NAME,
2196 .id_table = pl08x_ids,
2197 .probe = pl08x_probe,
2198};
2199
2200static int __init pl08x_init(void)
2201{
2202 int retval;
2203 retval = amba_driver_register(&pl08x_amba_driver);
2204 if (retval)
2205 printk(KERN_WARNING DRIVER_NAME
e8b5e11d 2206 "failed to register as an AMBA device (%d)\n",
e8689e63
LW
2207 retval);
2208 return retval;
2209}
2210subsys_initcall(pl08x_init);
This page took 0.193364 seconds and 5 git commands to generate.