Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / drivers / dma / ste_dma40.c
CommitLineData
8d318a50 1/*
d49278e3
PF
2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
661385f9 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
767a9675 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
8d318a50 6 * License terms: GNU General Public License (GPL) version 2
8d318a50
LW
7 */
8
b7f080cf 9#include <linux/dma-mapping.h>
8d318a50
LW
10#include <linux/kernel.h>
11#include <linux/slab.h>
f492b210 12#include <linux/export.h>
8d318a50
LW
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
7fb3e75e
N
17#include <linux/pm.h>
18#include <linux/pm_runtime.h>
698e4732 19#include <linux/err.h>
f4b89764 20#include <linux/amba/bus.h>
15e4b78d 21#include <linux/regulator/consumer.h>
8d318a50
LW
22
23#include <plat/ste_dma40.h>
24
d2ebfb33 25#include "dmaengine.h"
8d318a50
LW
26#include "ste_dma40_ll.h"
27
28#define D40_NAME "dma40"
29
30#define D40_PHY_CHAN -1
31
32/* For masking out/in 2 bit channel positions */
33#define D40_CHAN_POS(chan) (2 * (chan / 2))
34#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
35
36/* Maximum iterations taken before giving up suspending a channel */
37#define D40_SUSPEND_MAX_IT 500
38
7fb3e75e
N
39/* Milliseconds */
40#define DMA40_AUTOSUSPEND_DELAY 100
41
508849ad
LW
42/* Hardware requirement on LCLA alignment */
43#define LCLA_ALIGNMENT 0x40000
698e4732
JA
44
45/* Max number of links per event group */
46#define D40_LCLA_LINK_PER_EVENT_GRP 128
47#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
48
508849ad
LW
49/* Attempts before giving up to trying to get pages that are aligned */
50#define MAX_LCLA_ALLOC_ATTEMPTS 256
51
52/* Bit markings for allocation map */
8d318a50
LW
53#define D40_ALLOC_FREE (1 << 31)
54#define D40_ALLOC_PHY (1 << 30)
55#define D40_ALLOC_LOG_FREE 0
56
8d318a50
LW
57/**
58 * enum 40_command - The different commands and/or statuses.
59 *
60 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
61 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
62 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
63 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
64 */
65enum d40_command {
66 D40_DMA_STOP = 0,
67 D40_DMA_RUN = 1,
68 D40_DMA_SUSPEND_REQ = 2,
69 D40_DMA_SUSPENDED = 3
70};
71
1bdae6f4
N
72/*
73 * enum d40_events - The different Event Enables for the event lines.
74 *
75 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
76 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
77 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
78 * @D40_ROUND_EVENTLINE: Status check for event line.
79 */
80
81enum d40_events {
82 D40_DEACTIVATE_EVENTLINE = 0,
83 D40_ACTIVATE_EVENTLINE = 1,
84 D40_SUSPEND_REQ_EVENTLINE = 2,
85 D40_ROUND_EVENTLINE = 3
86};
87
7fb3e75e
N
88/*
89 * These are the registers that has to be saved and later restored
90 * when the DMA hw is powered off.
91 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
92 */
93static u32 d40_backup_regs[] = {
94 D40_DREG_LCPA,
95 D40_DREG_LCLA,
96 D40_DREG_PRMSE,
97 D40_DREG_PRMSO,
98 D40_DREG_PRMOE,
99 D40_DREG_PRMOO,
100};
101
102#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
103
104/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
105static u32 d40_backup_regs_v3[] = {
106 D40_DREG_PSEG1,
107 D40_DREG_PSEG2,
108 D40_DREG_PSEG3,
109 D40_DREG_PSEG4,
110 D40_DREG_PCEG1,
111 D40_DREG_PCEG2,
112 D40_DREG_PCEG3,
113 D40_DREG_PCEG4,
114 D40_DREG_RSEG1,
115 D40_DREG_RSEG2,
116 D40_DREG_RSEG3,
117 D40_DREG_RSEG4,
118 D40_DREG_RCEG1,
119 D40_DREG_RCEG2,
120 D40_DREG_RCEG3,
121 D40_DREG_RCEG4,
122};
123
124#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
125
126static u32 d40_backup_regs_chan[] = {
127 D40_CHAN_REG_SSCFG,
128 D40_CHAN_REG_SSELT,
129 D40_CHAN_REG_SSPTR,
130 D40_CHAN_REG_SSLNK,
131 D40_CHAN_REG_SDCFG,
132 D40_CHAN_REG_SDELT,
133 D40_CHAN_REG_SDPTR,
134 D40_CHAN_REG_SDLNK,
135};
136
8d318a50
LW
137/**
138 * struct d40_lli_pool - Structure for keeping LLIs in memory
139 *
140 * @base: Pointer to memory area when the pre_alloc_lli's are not large
141 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
142 * pre_alloc_lli is used.
b00f938c 143 * @dma_addr: DMA address, if mapped
8d318a50
LW
144 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
145 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
146 * one buffer to one buffer.
147 */
148struct d40_lli_pool {
149 void *base;
508849ad 150 int size;
b00f938c 151 dma_addr_t dma_addr;
8d318a50 152 /* Space for dst and src, plus an extra for padding */
508849ad 153 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
8d318a50
LW
154};
155
156/**
157 * struct d40_desc - A descriptor is one DMA job.
158 *
159 * @lli_phy: LLI settings for physical channel. Both src and dst=
160 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
161 * lli_len equals one.
162 * @lli_log: Same as above but for logical channels.
163 * @lli_pool: The pool with two entries pre-allocated.
941b77a3 164 * @lli_len: Number of llis of current descriptor.
25985edc 165 * @lli_current: Number of transferred llis.
698e4732 166 * @lcla_alloc: Number of LCLA entries allocated.
8d318a50
LW
167 * @txd: DMA engine struct. Used for among other things for communication
168 * during a transfer.
169 * @node: List entry.
8d318a50 170 * @is_in_client_list: true if the client owns this descriptor.
7fb3e75e 171 * @cyclic: true if this is a cyclic job
8d318a50
LW
172 *
173 * This descriptor is used for both logical and physical transfers.
174 */
8d318a50
LW
175struct d40_desc {
176 /* LLI physical */
177 struct d40_phy_lli_bidir lli_phy;
178 /* LLI logical */
179 struct d40_log_lli_bidir lli_log;
180
181 struct d40_lli_pool lli_pool;
941b77a3 182 int lli_len;
698e4732
JA
183 int lli_current;
184 int lcla_alloc;
8d318a50
LW
185
186 struct dma_async_tx_descriptor txd;
187 struct list_head node;
188
8d318a50 189 bool is_in_client_list;
0c842b55 190 bool cyclic;
8d318a50
LW
191};
192
193/**
194 * struct d40_lcla_pool - LCLA pool settings and data.
195 *
508849ad
LW
196 * @base: The virtual address of LCLA. 18 bit aligned.
197 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
198 * This pointer is only there for clean-up on error.
199 * @pages: The number of pages needed for all physical channels.
200 * Only used later for clean-up on error
8d318a50 201 * @lock: Lock to protect the content in this struct.
698e4732 202 * @alloc_map: big map over which LCLA entry is own by which job.
8d318a50
LW
203 */
204struct d40_lcla_pool {
205 void *base;
026cbc42 206 dma_addr_t dma_addr;
508849ad
LW
207 void *base_unaligned;
208 int pages;
8d318a50 209 spinlock_t lock;
698e4732 210 struct d40_desc **alloc_map;
8d318a50
LW
211};
212
213/**
214 * struct d40_phy_res - struct for handling eventlines mapped to physical
215 * channels.
216 *
217 * @lock: A lock protection this entity.
7fb3e75e 218 * @reserved: True if used by secure world or otherwise.
8d318a50
LW
219 * @num: The physical channel number of this entity.
220 * @allocated_src: Bit mapped to show which src event line's are mapped to
221 * this physical channel. Can also be free or physically allocated.
222 * @allocated_dst: Same as for src but is dst.
223 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
767a9675 224 * event line number.
8d318a50
LW
225 */
226struct d40_phy_res {
227 spinlock_t lock;
7fb3e75e 228 bool reserved;
8d318a50
LW
229 int num;
230 u32 allocated_src;
231 u32 allocated_dst;
232};
233
234struct d40_base;
235
236/**
237 * struct d40_chan - Struct that describes a channel.
238 *
239 * @lock: A spinlock to protect this struct.
240 * @log_num: The logical number, if any of this channel.
8d318a50
LW
241 * @pending_tx: The number of pending transfers. Used between interrupt handler
242 * and tasklet.
243 * @busy: Set to true when transfer is ongoing on this channel.
2a614340
JA
244 * @phy_chan: Pointer to physical channel which this instance runs on. If this
245 * point is NULL, then the channel is not allocated.
8d318a50
LW
246 * @chan: DMA engine handle.
247 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
248 * transfer and call client callback.
249 * @client: Cliented owned descriptor list.
da063d26 250 * @pending_queue: Submitted jobs, to be issued by issue_pending()
8d318a50
LW
251 * @active: Active descriptor.
252 * @queue: Queued jobs.
82babbb3 253 * @prepare_queue: Prepared jobs.
8d318a50 254 * @dma_cfg: The client configuration of this dma channel.
ce2ca125 255 * @configured: whether the dma_cfg configuration is valid
8d318a50
LW
256 * @base: Pointer to the device instance struct.
257 * @src_def_cfg: Default cfg register setting for src.
258 * @dst_def_cfg: Default cfg register setting for dst.
259 * @log_def: Default logical channel settings.
8d318a50 260 * @lcpa: Pointer to dst and src lcpa settings.
ae752bf4 261 * @runtime_addr: runtime configured address.
262 * @runtime_direction: runtime configured direction.
8d318a50
LW
263 *
264 * This struct can either "be" a logical or a physical channel.
265 */
266struct d40_chan {
267 spinlock_t lock;
268 int log_num;
8d318a50
LW
269 int pending_tx;
270 bool busy;
271 struct d40_phy_res *phy_chan;
272 struct dma_chan chan;
273 struct tasklet_struct tasklet;
274 struct list_head client;
a8f3067b 275 struct list_head pending_queue;
8d318a50
LW
276 struct list_head active;
277 struct list_head queue;
82babbb3 278 struct list_head prepare_queue;
8d318a50 279 struct stedma40_chan_cfg dma_cfg;
ce2ca125 280 bool configured;
8d318a50
LW
281 struct d40_base *base;
282 /* Default register configurations */
283 u32 src_def_cfg;
284 u32 dst_def_cfg;
285 struct d40_def_lcsp log_def;
8d318a50 286 struct d40_log_lli_full *lcpa;
95e1400f
LW
287 /* Runtime reconfiguration */
288 dma_addr_t runtime_addr;
db8196df 289 enum dma_transfer_direction runtime_direction;
8d318a50
LW
290};
291
292/**
293 * struct d40_base - The big global struct, one for each probe'd instance.
294 *
295 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
296 * @execmd_lock: Lock for execute command usage since several channels share
297 * the same physical register.
298 * @dev: The device structure.
299 * @virtbase: The virtual base address of the DMA's register.
f4185592 300 * @rev: silicon revision detected.
8d318a50
LW
301 * @clk: Pointer to the DMA clock structure.
302 * @phy_start: Physical memory start of the DMA registers.
303 * @phy_size: Size of the DMA register map.
304 * @irq: The IRQ number.
305 * @num_phy_chans: The number of physical channels. Read from HW. This
306 * is the number of available channels for this driver, not counting "Secure
307 * mode" allocated physical channels.
308 * @num_log_chans: The number of logical channels. Calculated from
309 * num_phy_chans.
310 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
311 * @dma_slave: dma_device channels that can do only do slave transfers.
312 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
7fb3e75e 313 * @phy_chans: Room for all possible physical channels in system.
8d318a50
LW
314 * @log_chans: Room for all possible logical channels in system.
315 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
316 * to log_chans entries.
317 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
318 * to phy_chans entries.
319 * @plat_data: Pointer to provided platform_data which is the driver
320 * configuration.
28c7a19d 321 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
8d318a50
LW
322 * @phy_res: Vector containing all physical channels.
323 * @lcla_pool: lcla pool settings and data.
324 * @lcpa_base: The virtual mapped address of LCPA.
325 * @phy_lcpa: The physical address of the LCPA.
326 * @lcpa_size: The size of the LCPA area.
c675b1b4 327 * @desc_slab: cache for descriptors.
7fb3e75e
N
328 * @reg_val_backup: Here the values of some hardware registers are stored
329 * before the DMA is powered off. They are restored when the power is back on.
330 * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
331 * later.
332 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
333 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
334 * @initialized: true if the dma has been initialized
8d318a50
LW
335 */
336struct d40_base {
337 spinlock_t interrupt_lock;
338 spinlock_t execmd_lock;
339 struct device *dev;
340 void __iomem *virtbase;
f4185592 341 u8 rev:4;
8d318a50
LW
342 struct clk *clk;
343 phys_addr_t phy_start;
344 resource_size_t phy_size;
345 int irq;
346 int num_phy_chans;
347 int num_log_chans;
348 struct dma_device dma_both;
349 struct dma_device dma_slave;
350 struct dma_device dma_memcpy;
351 struct d40_chan *phy_chans;
352 struct d40_chan *log_chans;
353 struct d40_chan **lookup_log_chans;
354 struct d40_chan **lookup_phy_chans;
355 struct stedma40_platform_data *plat_data;
28c7a19d 356 struct regulator *lcpa_regulator;
8d318a50
LW
357 /* Physical half channels */
358 struct d40_phy_res *phy_res;
359 struct d40_lcla_pool lcla_pool;
360 void *lcpa_base;
361 dma_addr_t phy_lcpa;
362 resource_size_t lcpa_size;
c675b1b4 363 struct kmem_cache *desc_slab;
7fb3e75e
N
364 u32 reg_val_backup[BACKUP_REGS_SZ];
365 u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
366 u32 *reg_val_backup_chan;
367 u16 gcc_pwr_off_mask;
368 bool initialized;
8d318a50
LW
369};
370
371/**
372 * struct d40_interrupt_lookup - lookup table for interrupt handler
373 *
374 * @src: Interrupt mask register.
375 * @clr: Interrupt clear register.
376 * @is_error: true if this is an error interrupt.
377 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
378 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
379 */
380struct d40_interrupt_lookup {
381 u32 src;
382 u32 clr;
383 bool is_error;
384 int offset;
385};
386
387/**
388 * struct d40_reg_val - simple lookup struct
389 *
390 * @reg: The register.
391 * @val: The value that belongs to the register in reg.
392 */
393struct d40_reg_val {
394 unsigned int reg;
395 unsigned int val;
396};
397
262d2915
RV
398static struct device *chan2dev(struct d40_chan *d40c)
399{
400 return &d40c->chan.dev->device;
401}
402
724a8577
RV
403static bool chan_is_physical(struct d40_chan *chan)
404{
405 return chan->log_num == D40_PHY_CHAN;
406}
407
408static bool chan_is_logical(struct d40_chan *chan)
409{
410 return !chan_is_physical(chan);
411}
412
8ca84687
RV
413static void __iomem *chan_base(struct d40_chan *chan)
414{
415 return chan->base->virtbase + D40_DREG_PCBASE +
416 chan->phy_chan->num * D40_DREG_PCDELTA;
417}
418
6db5a8ba
RV
419#define d40_err(dev, format, arg...) \
420 dev_err(dev, "[%s] " format, __func__, ## arg)
421
422#define chan_err(d40c, format, arg...) \
423 d40_err(chan2dev(d40c), format, ## arg)
424
b00f938c 425static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
dbd88788 426 int lli_len)
8d318a50 427{
dbd88788 428 bool is_log = chan_is_logical(d40c);
8d318a50
LW
429 u32 align;
430 void *base;
431
432 if (is_log)
433 align = sizeof(struct d40_log_lli);
434 else
435 align = sizeof(struct d40_phy_lli);
436
437 if (lli_len == 1) {
438 base = d40d->lli_pool.pre_alloc_lli;
439 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
440 d40d->lli_pool.base = NULL;
441 } else {
594ece4d 442 d40d->lli_pool.size = lli_len * 2 * align;
8d318a50
LW
443
444 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
445 d40d->lli_pool.base = base;
446
447 if (d40d->lli_pool.base == NULL)
448 return -ENOMEM;
449 }
450
451 if (is_log) {
d924abad 452 d40d->lli_log.src = PTR_ALIGN(base, align);
594ece4d 453 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
b00f938c
RV
454
455 d40d->lli_pool.dma_addr = 0;
8d318a50 456 } else {
d924abad 457 d40d->lli_phy.src = PTR_ALIGN(base, align);
594ece4d 458 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
b00f938c
RV
459
460 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
461 d40d->lli_phy.src,
462 d40d->lli_pool.size,
463 DMA_TO_DEVICE);
464
465 if (dma_mapping_error(d40c->base->dev,
466 d40d->lli_pool.dma_addr)) {
467 kfree(d40d->lli_pool.base);
468 d40d->lli_pool.base = NULL;
469 d40d->lli_pool.dma_addr = 0;
470 return -ENOMEM;
471 }
8d318a50
LW
472 }
473
474 return 0;
475}
476
b00f938c 477static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
8d318a50 478{
b00f938c
RV
479 if (d40d->lli_pool.dma_addr)
480 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
481 d40d->lli_pool.size, DMA_TO_DEVICE);
482
8d318a50
LW
483 kfree(d40d->lli_pool.base);
484 d40d->lli_pool.base = NULL;
485 d40d->lli_pool.size = 0;
486 d40d->lli_log.src = NULL;
487 d40d->lli_log.dst = NULL;
488 d40d->lli_phy.src = NULL;
489 d40d->lli_phy.dst = NULL;
8d318a50
LW
490}
491
698e4732
JA
492static int d40_lcla_alloc_one(struct d40_chan *d40c,
493 struct d40_desc *d40d)
494{
495 unsigned long flags;
496 int i;
497 int ret = -EINVAL;
498 int p;
499
500 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
501
502 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
503
504 /*
505 * Allocate both src and dst at the same time, therefore the half
506 * start on 1 since 0 can't be used since zero is used as end marker.
507 */
508 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
509 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
510 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
511 d40d->lcla_alloc++;
512 ret = i;
513 break;
514 }
515 }
516
517 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
518
519 return ret;
520}
521
522static int d40_lcla_free_all(struct d40_chan *d40c,
523 struct d40_desc *d40d)
524{
525 unsigned long flags;
526 int i;
527 int ret = -EINVAL;
528
724a8577 529 if (chan_is_physical(d40c))
698e4732
JA
530 return 0;
531
532 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
533
534 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
535 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
536 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
537 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
538 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
539 d40d->lcla_alloc--;
540 if (d40d->lcla_alloc == 0) {
541 ret = 0;
542 break;
543 }
544 }
545 }
546
547 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
548
549 return ret;
550
551}
552
8d318a50
LW
553static void d40_desc_remove(struct d40_desc *d40d)
554{
555 list_del(&d40d->node);
556}
557
558static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
559{
a2c15fa4 560 struct d40_desc *desc = NULL;
8d318a50
LW
561
562 if (!list_empty(&d40c->client)) {
a2c15fa4
RV
563 struct d40_desc *d;
564 struct d40_desc *_d;
565
7fb3e75e 566 list_for_each_entry_safe(d, _d, &d40c->client, node) {
8d318a50 567 if (async_tx_test_ack(&d->txd)) {
8d318a50 568 d40_desc_remove(d);
a2c15fa4
RV
569 desc = d;
570 memset(desc, 0, sizeof(*desc));
c675b1b4 571 break;
8d318a50 572 }
7fb3e75e 573 }
8d318a50 574 }
a2c15fa4
RV
575
576 if (!desc)
577 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
578
579 if (desc)
580 INIT_LIST_HEAD(&desc->node);
581
582 return desc;
8d318a50
LW
583}
584
585static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
586{
698e4732 587
b00f938c 588 d40_pool_lli_free(d40c, d40d);
698e4732 589 d40_lcla_free_all(d40c, d40d);
c675b1b4 590 kmem_cache_free(d40c->base->desc_slab, d40d);
8d318a50
LW
591}
592
593static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
594{
595 list_add_tail(&desc->node, &d40c->active);
596}
597
1c4b0927
RV
598static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
599{
600 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
601 struct d40_phy_lli *lli_src = desc->lli_phy.src;
602 void __iomem *base = chan_base(chan);
603
604 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
605 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
606 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
607 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
608
609 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
610 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
611 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
612 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
613}
614
e65889c7 615static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
698e4732 616{
e65889c7
RV
617 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
618 struct d40_log_lli_bidir *lli = &desc->lli_log;
619 int lli_current = desc->lli_current;
620 int lli_len = desc->lli_len;
0c842b55 621 bool cyclic = desc->cyclic;
e65889c7 622 int curr_lcla = -EINVAL;
0c842b55 623 int first_lcla = 0;
28c7a19d 624 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
0c842b55 625 bool linkback;
e65889c7 626
0c842b55
RV
627 /*
628 * We may have partially running cyclic transfers, in case we did't get
629 * enough LCLA entries.
630 */
631 linkback = cyclic && lli_current == 0;
632
633 /*
634 * For linkback, we need one LCLA even with only one link, because we
635 * can't link back to the one in LCPA space
636 */
637 if (linkback || (lli_len - lli_current > 1)) {
e65889c7 638 curr_lcla = d40_lcla_alloc_one(chan, desc);
0c842b55
RV
639 first_lcla = curr_lcla;
640 }
641
642 /*
643 * For linkback, we normally load the LCPA in the loop since we need to
644 * link it to the second LCLA and not the first. However, if we
645 * couldn't even get a first LCLA, then we have to run in LCPA and
646 * reload manually.
647 */
648 if (!linkback || curr_lcla == -EINVAL) {
649 unsigned int flags = 0;
e65889c7 650
0c842b55
RV
651 if (curr_lcla == -EINVAL)
652 flags |= LLI_TERM_INT;
e65889c7 653
0c842b55
RV
654 d40_log_lli_lcpa_write(chan->lcpa,
655 &lli->dst[lli_current],
656 &lli->src[lli_current],
657 curr_lcla,
658 flags);
659 lli_current++;
660 }
6045f0bb
RV
661
662 if (curr_lcla < 0)
663 goto out;
664
e65889c7
RV
665 for (; lli_current < lli_len; lli_current++) {
666 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
667 8 * curr_lcla * 2;
668 struct d40_log_lli *lcla = pool->base + lcla_offset;
0c842b55 669 unsigned int flags = 0;
e65889c7
RV
670 int next_lcla;
671
672 if (lli_current + 1 < lli_len)
673 next_lcla = d40_lcla_alloc_one(chan, desc);
674 else
0c842b55
RV
675 next_lcla = linkback ? first_lcla : -EINVAL;
676
677 if (cyclic || next_lcla == -EINVAL)
678 flags |= LLI_TERM_INT;
e65889c7 679
0c842b55
RV
680 if (linkback && curr_lcla == first_lcla) {
681 /* First link goes in both LCPA and LCLA */
682 d40_log_lli_lcpa_write(chan->lcpa,
683 &lli->dst[lli_current],
684 &lli->src[lli_current],
685 next_lcla, flags);
686 }
687
688 /*
689 * One unused LCLA in the cyclic case if the very first
690 * next_lcla fails...
691 */
e65889c7
RV
692 d40_log_lli_lcla_write(lcla,
693 &lli->dst[lli_current],
694 &lli->src[lli_current],
0c842b55 695 next_lcla, flags);
e65889c7 696
28c7a19d
N
697 /*
698 * Cache maintenance is not needed if lcla is
699 * mapped in esram
700 */
701 if (!use_esram_lcla) {
702 dma_sync_single_range_for_device(chan->base->dev,
703 pool->dma_addr, lcla_offset,
704 2 * sizeof(struct d40_log_lli),
705 DMA_TO_DEVICE);
706 }
e65889c7
RV
707 curr_lcla = next_lcla;
708
0c842b55 709 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
e65889c7
RV
710 lli_current++;
711 break;
712 }
713 }
714
6045f0bb 715out:
e65889c7
RV
716 desc->lli_current = lli_current;
717}
698e4732 718
e65889c7
RV
719static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
720{
724a8577 721 if (chan_is_physical(d40c)) {
1c4b0927 722 d40_phy_lli_load(d40c, d40d);
698e4732 723 d40d->lli_current = d40d->lli_len;
e65889c7
RV
724 } else
725 d40_log_lli_to_lcxa(d40c, d40d);
698e4732
JA
726}
727
8d318a50
LW
728static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
729{
730 struct d40_desc *d;
731
732 if (list_empty(&d40c->active))
733 return NULL;
734
735 d = list_first_entry(&d40c->active,
736 struct d40_desc,
737 node);
738 return d;
739}
740
7404368c 741/* remove desc from current queue and add it to the pending_queue */
8d318a50
LW
742static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
743{
7404368c
PF
744 d40_desc_remove(desc);
745 desc->is_in_client_list = false;
a8f3067b
PF
746 list_add_tail(&desc->node, &d40c->pending_queue);
747}
748
749static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
750{
751 struct d40_desc *d;
752
753 if (list_empty(&d40c->pending_queue))
754 return NULL;
755
756 d = list_first_entry(&d40c->pending_queue,
757 struct d40_desc,
758 node);
759 return d;
8d318a50
LW
760}
761
762static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
763{
764 struct d40_desc *d;
765
766 if (list_empty(&d40c->queue))
767 return NULL;
768
769 d = list_first_entry(&d40c->queue,
770 struct d40_desc,
771 node);
772 return d;
773}
774
d49278e3
PF
775static int d40_psize_2_burst_size(bool is_log, int psize)
776{
777 if (is_log) {
778 if (psize == STEDMA40_PSIZE_LOG_1)
779 return 1;
780 } else {
781 if (psize == STEDMA40_PSIZE_PHY_1)
782 return 1;
783 }
784
785 return 2 << psize;
786}
787
788/*
789 * The dma only supports transmitting packages up to
790 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
791 * dma elements required to send the entire sg list
792 */
793static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
794{
795 int dmalen;
796 u32 max_w = max(data_width1, data_width2);
797 u32 min_w = min(data_width1, data_width2);
798 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
799
800 if (seg_max > STEDMA40_MAX_SEG_SIZE)
801 seg_max -= (1 << max_w);
802
803 if (!IS_ALIGNED(size, 1 << max_w))
804 return -EINVAL;
805
806 if (size <= seg_max)
807 dmalen = 1;
808 else {
809 dmalen = size / seg_max;
810 if (dmalen * seg_max < size)
811 dmalen++;
812 }
813 return dmalen;
814}
815
816static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
817 u32 data_width1, u32 data_width2)
818{
819 struct scatterlist *sg;
820 int i;
821 int len = 0;
822 int ret;
823
824 for_each_sg(sgl, sg, sg_len, i) {
825 ret = d40_size_2_dmalen(sg_dma_len(sg),
826 data_width1, data_width2);
827 if (ret < 0)
828 return ret;
829 len += ret;
830 }
831 return len;
832}
8d318a50 833
7fb3e75e
N
834
835#ifdef CONFIG_PM
836static void dma40_backup(void __iomem *baseaddr, u32 *backup,
837 u32 *regaddr, int num, bool save)
838{
839 int i;
840
841 for (i = 0; i < num; i++) {
842 void __iomem *addr = baseaddr + regaddr[i];
843
844 if (save)
845 backup[i] = readl_relaxed(addr);
846 else
847 writel_relaxed(backup[i], addr);
848 }
849}
850
851static void d40_save_restore_registers(struct d40_base *base, bool save)
852{
853 int i;
854
855 /* Save/Restore channel specific registers */
856 for (i = 0; i < base->num_phy_chans; i++) {
857 void __iomem *addr;
858 int idx;
859
860 if (base->phy_res[i].reserved)
861 continue;
862
863 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
864 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
865
866 dma40_backup(addr, &base->reg_val_backup_chan[idx],
867 d40_backup_regs_chan,
868 ARRAY_SIZE(d40_backup_regs_chan),
869 save);
870 }
871
872 /* Save/Restore global registers */
873 dma40_backup(base->virtbase, base->reg_val_backup,
874 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
875 save);
876
877 /* Save/Restore registers only existing on dma40 v3 and later */
878 if (base->rev >= 3)
879 dma40_backup(base->virtbase, base->reg_val_backup_v3,
880 d40_backup_regs_v3,
881 ARRAY_SIZE(d40_backup_regs_v3),
882 save);
883}
884#else
885static void d40_save_restore_registers(struct d40_base *base, bool save)
886{
887}
888#endif
8d318a50 889
1bdae6f4
N
890static int __d40_execute_command_phy(struct d40_chan *d40c,
891 enum d40_command command)
8d318a50 892{
767a9675
JA
893 u32 status;
894 int i;
8d318a50
LW
895 void __iomem *active_reg;
896 int ret = 0;
897 unsigned long flags;
1d392a7b 898 u32 wmask;
8d318a50 899
1bdae6f4
N
900 if (command == D40_DMA_STOP) {
901 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
902 if (ret)
903 return ret;
904 }
905
8d318a50
LW
906 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
907
908 if (d40c->phy_chan->num % 2 == 0)
909 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
910 else
911 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
912
913 if (command == D40_DMA_SUSPEND_REQ) {
914 status = (readl(active_reg) &
915 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
916 D40_CHAN_POS(d40c->phy_chan->num);
917
918 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
919 goto done;
920 }
921
1d392a7b
JA
922 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
923 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
924 active_reg);
8d318a50
LW
925
926 if (command == D40_DMA_SUSPEND_REQ) {
927
928 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
929 status = (readl(active_reg) &
930 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
931 D40_CHAN_POS(d40c->phy_chan->num);
932
933 cpu_relax();
934 /*
935 * Reduce the number of bus accesses while
936 * waiting for the DMA to suspend.
937 */
938 udelay(3);
939
940 if (status == D40_DMA_STOP ||
941 status == D40_DMA_SUSPENDED)
942 break;
943 }
944
945 if (i == D40_SUSPEND_MAX_IT) {
6db5a8ba
RV
946 chan_err(d40c,
947 "unable to suspend the chl %d (log: %d) status %x\n",
948 d40c->phy_chan->num, d40c->log_num,
8d318a50
LW
949 status);
950 dump_stack();
951 ret = -EBUSY;
952 }
953
954 }
955done:
956 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
957 return ret;
958}
959
960static void d40_term_all(struct d40_chan *d40c)
961{
962 struct d40_desc *d40d;
7404368c 963 struct d40_desc *_d;
8d318a50
LW
964
965 /* Release active descriptors */
966 while ((d40d = d40_first_active_get(d40c))) {
967 d40_desc_remove(d40d);
8d318a50
LW
968 d40_desc_free(d40c, d40d);
969 }
970
971 /* Release queued descriptors waiting for transfer */
972 while ((d40d = d40_first_queued(d40c))) {
973 d40_desc_remove(d40d);
8d318a50
LW
974 d40_desc_free(d40c, d40d);
975 }
976
a8f3067b
PF
977 /* Release pending descriptors */
978 while ((d40d = d40_first_pending(d40c))) {
979 d40_desc_remove(d40d);
980 d40_desc_free(d40c, d40d);
981 }
8d318a50 982
7404368c
PF
983 /* Release client owned descriptors */
984 if (!list_empty(&d40c->client))
985 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
986 d40_desc_remove(d40d);
987 d40_desc_free(d40c, d40d);
988 }
989
82babbb3
PF
990 /* Release descriptors in prepare queue */
991 if (!list_empty(&d40c->prepare_queue))
992 list_for_each_entry_safe(d40d, _d,
993 &d40c->prepare_queue, node) {
994 d40_desc_remove(d40d);
995 d40_desc_free(d40c, d40d);
996 }
7404368c 997
8d318a50 998 d40c->pending_tx = 0;
8d318a50
LW
999}
1000
1bdae6f4
N
1001static void __d40_config_set_event(struct d40_chan *d40c,
1002 enum d40_events event_type, u32 event,
1003 int reg)
262d2915 1004{
8ca84687 1005 void __iomem *addr = chan_base(d40c) + reg;
262d2915 1006 int tries;
1bdae6f4
N
1007 u32 status;
1008
1009 switch (event_type) {
1010
1011 case D40_DEACTIVATE_EVENTLINE:
262d2915 1012
262d2915
RV
1013 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1014 | ~D40_EVENTLINE_MASK(event), addr);
1bdae6f4
N
1015 break;
1016
1017 case D40_SUSPEND_REQ_EVENTLINE:
1018 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1019 D40_EVENTLINE_POS(event);
1020
1021 if (status == D40_DEACTIVATE_EVENTLINE ||
1022 status == D40_SUSPEND_REQ_EVENTLINE)
1023 break;
262d2915 1024
1bdae6f4
N
1025 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1026 | ~D40_EVENTLINE_MASK(event), addr);
1027
1028 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1029
1030 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1031 D40_EVENTLINE_POS(event);
1032
1033 cpu_relax();
1034 /*
1035 * Reduce the number of bus accesses while
1036 * waiting for the DMA to suspend.
1037 */
1038 udelay(3);
1039
1040 if (status == D40_DEACTIVATE_EVENTLINE)
1041 break;
1042 }
1043
1044 if (tries == D40_SUSPEND_MAX_IT) {
1045 chan_err(d40c,
1046 "unable to stop the event_line chl %d (log: %d)"
1047 "status %x\n", d40c->phy_chan->num,
1048 d40c->log_num, status);
1049 }
1050 break;
1051
1052 case D40_ACTIVATE_EVENTLINE:
262d2915
RV
1053 /*
1054 * The hardware sometimes doesn't register the enable when src and dst
1055 * event lines are active on the same logical channel. Retry to ensure
1056 * it does. Usually only one retry is sufficient.
1057 */
1bdae6f4
N
1058 tries = 100;
1059 while (--tries) {
1060 writel((D40_ACTIVATE_EVENTLINE <<
1061 D40_EVENTLINE_POS(event)) |
1062 ~D40_EVENTLINE_MASK(event), addr);
262d2915 1063
1bdae6f4
N
1064 if (readl(addr) & D40_EVENTLINE_MASK(event))
1065 break;
1066 }
262d2915 1067
1bdae6f4
N
1068 if (tries != 99)
1069 dev_dbg(chan2dev(d40c),
1070 "[%s] workaround enable S%cLNK (%d tries)\n",
1071 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1072 100 - tries);
262d2915 1073
1bdae6f4
N
1074 WARN_ON(!tries);
1075 break;
262d2915 1076
1bdae6f4
N
1077 case D40_ROUND_EVENTLINE:
1078 BUG();
1079 break;
8d318a50 1080
1bdae6f4
N
1081 }
1082}
8d318a50 1083
1bdae6f4
N
1084static void d40_config_set_event(struct d40_chan *d40c,
1085 enum d40_events event_type)
1086{
8d318a50
LW
1087 /* Enable event line connected to device (or memcpy) */
1088 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1089 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1090 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1091
1bdae6f4 1092 __d40_config_set_event(d40c, event_type, event,
262d2915 1093 D40_CHAN_REG_SSLNK);
8d318a50 1094 }
262d2915 1095
8d318a50
LW
1096 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1097 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1098
1bdae6f4 1099 __d40_config_set_event(d40c, event_type, event,
262d2915 1100 D40_CHAN_REG_SDLNK);
8d318a50 1101 }
8d318a50
LW
1102}
1103
a5ebca47 1104static u32 d40_chan_has_events(struct d40_chan *d40c)
8d318a50 1105{
8ca84687 1106 void __iomem *chanbase = chan_base(d40c);
be8cb7df 1107 u32 val;
8d318a50 1108
8ca84687
RV
1109 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1110 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
be8cb7df 1111
a5ebca47 1112 return val;
8d318a50
LW
1113}
1114
1bdae6f4
N
1115static int
1116__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1117{
1118 unsigned long flags;
1119 int ret = 0;
1120 u32 active_status;
1121 void __iomem *active_reg;
1122
1123 if (d40c->phy_chan->num % 2 == 0)
1124 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1125 else
1126 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1127
1128
1129 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1130
1131 switch (command) {
1132 case D40_DMA_STOP:
1133 case D40_DMA_SUSPEND_REQ:
1134
1135 active_status = (readl(active_reg) &
1136 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1137 D40_CHAN_POS(d40c->phy_chan->num);
1138
1139 if (active_status == D40_DMA_RUN)
1140 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1141 else
1142 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1143
1144 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1145 ret = __d40_execute_command_phy(d40c, command);
1146
1147 break;
1148
1149 case D40_DMA_RUN:
1150
1151 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1152 ret = __d40_execute_command_phy(d40c, command);
1153 break;
1154
1155 case D40_DMA_SUSPENDED:
1156 BUG();
1157 break;
1158 }
1159
1160 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1161 return ret;
1162}
1163
1164static int d40_channel_execute_command(struct d40_chan *d40c,
1165 enum d40_command command)
1166{
1167 if (chan_is_logical(d40c))
1168 return __d40_execute_command_log(d40c, command);
1169 else
1170 return __d40_execute_command_phy(d40c, command);
1171}
1172
20a5b6d0
RV
1173static u32 d40_get_prmo(struct d40_chan *d40c)
1174{
1175 static const unsigned int phy_map[] = {
1176 [STEDMA40_PCHAN_BASIC_MODE]
1177 = D40_DREG_PRMO_PCHAN_BASIC,
1178 [STEDMA40_PCHAN_MODULO_MODE]
1179 = D40_DREG_PRMO_PCHAN_MODULO,
1180 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1181 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1182 };
1183 static const unsigned int log_map[] = {
1184 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1185 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1186 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1187 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1188 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1189 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1190 };
1191
724a8577 1192 if (chan_is_physical(d40c))
20a5b6d0
RV
1193 return phy_map[d40c->dma_cfg.mode_opt];
1194 else
1195 return log_map[d40c->dma_cfg.mode_opt];
1196}
1197
b55912c6 1198static void d40_config_write(struct d40_chan *d40c)
8d318a50
LW
1199{
1200 u32 addr_base;
1201 u32 var;
8d318a50
LW
1202
1203 /* Odd addresses are even addresses + 4 */
1204 addr_base = (d40c->phy_chan->num % 2) * 4;
1205 /* Setup channel mode to logical or physical */
724a8577 1206 var = ((u32)(chan_is_logical(d40c)) + 1) <<
8d318a50
LW
1207 D40_CHAN_POS(d40c->phy_chan->num);
1208 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1209
1210 /* Setup operational mode option register */
20a5b6d0 1211 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
8d318a50
LW
1212
1213 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1214
724a8577 1215 if (chan_is_logical(d40c)) {
8ca84687
RV
1216 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1217 & D40_SREG_ELEM_LOG_LIDX_MASK;
1218 void __iomem *chanbase = chan_base(d40c);
1219
8d318a50 1220 /* Set default config for CFG reg */
8ca84687
RV
1221 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1222 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
8d318a50 1223
b55912c6 1224 /* Set LIDX for lcla */
8ca84687
RV
1225 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1226 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
e9f3a49c
RV
1227
1228 /* Clear LNK which will be used by d40_chan_has_events() */
1229 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1230 writel(0, chanbase + D40_CHAN_REG_SDLNK);
8d318a50 1231 }
8d318a50
LW
1232}
1233
aa182ae2
JA
1234static u32 d40_residue(struct d40_chan *d40c)
1235{
1236 u32 num_elt;
1237
724a8577 1238 if (chan_is_logical(d40c))
aa182ae2
JA
1239 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1240 >> D40_MEM_LCSP2_ECNT_POS;
8ca84687
RV
1241 else {
1242 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1243 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1244 >> D40_SREG_ELEM_PHY_ECNT_POS;
1245 }
1246
aa182ae2
JA
1247 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1248}
1249
1250static bool d40_tx_is_linked(struct d40_chan *d40c)
1251{
1252 bool is_link;
1253
724a8577 1254 if (chan_is_logical(d40c))
aa182ae2
JA
1255 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1256 else
8ca84687
RV
1257 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1258 & D40_SREG_LNK_PHYS_LNK_MASK;
1259
aa182ae2
JA
1260 return is_link;
1261}
1262
86eb5fb6 1263static int d40_pause(struct d40_chan *d40c)
aa182ae2 1264{
aa182ae2
JA
1265 int res = 0;
1266 unsigned long flags;
1267
3ac012af
JA
1268 if (!d40c->busy)
1269 return 0;
1270
7fb3e75e 1271 pm_runtime_get_sync(d40c->base->dev);
aa182ae2
JA
1272 spin_lock_irqsave(&d40c->lock, flags);
1273
1274 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1bdae6f4 1275
7fb3e75e
N
1276 pm_runtime_mark_last_busy(d40c->base->dev);
1277 pm_runtime_put_autosuspend(d40c->base->dev);
aa182ae2
JA
1278 spin_unlock_irqrestore(&d40c->lock, flags);
1279 return res;
1280}
1281
86eb5fb6 1282static int d40_resume(struct d40_chan *d40c)
aa182ae2 1283{
aa182ae2
JA
1284 int res = 0;
1285 unsigned long flags;
1286
3ac012af
JA
1287 if (!d40c->busy)
1288 return 0;
1289
aa182ae2 1290 spin_lock_irqsave(&d40c->lock, flags);
7fb3e75e 1291 pm_runtime_get_sync(d40c->base->dev);
aa182ae2
JA
1292
1293 /* If bytes left to transfer or linked tx resume job */
1bdae6f4 1294 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
aa182ae2 1295 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
aa182ae2 1296
7fb3e75e
N
1297 pm_runtime_mark_last_busy(d40c->base->dev);
1298 pm_runtime_put_autosuspend(d40c->base->dev);
aa182ae2
JA
1299 spin_unlock_irqrestore(&d40c->lock, flags);
1300 return res;
1301}
1302
8d318a50
LW
1303static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1304{
1305 struct d40_chan *d40c = container_of(tx->chan,
1306 struct d40_chan,
1307 chan);
1308 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1309 unsigned long flags;
884485e1 1310 dma_cookie_t cookie;
8d318a50
LW
1311
1312 spin_lock_irqsave(&d40c->lock, flags);
884485e1 1313 cookie = dma_cookie_assign(tx);
8d318a50 1314 d40_desc_queue(d40c, d40d);
8d318a50
LW
1315 spin_unlock_irqrestore(&d40c->lock, flags);
1316
884485e1 1317 return cookie;
8d318a50
LW
1318}
1319
1320static int d40_start(struct d40_chan *d40c)
1321{
0c32269d 1322 return d40_channel_execute_command(d40c, D40_DMA_RUN);
8d318a50
LW
1323}
1324
1325static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1326{
1327 struct d40_desc *d40d;
1328 int err;
1329
1330 /* Start queued jobs, if any */
1331 d40d = d40_first_queued(d40c);
1332
1333 if (d40d != NULL) {
1bdae6f4 1334 if (!d40c->busy) {
7fb3e75e 1335 d40c->busy = true;
1bdae6f4
N
1336 pm_runtime_get_sync(d40c->base->dev);
1337 }
8d318a50
LW
1338
1339 /* Remove from queue */
1340 d40_desc_remove(d40d);
1341
1342 /* Add to active queue */
1343 d40_desc_submit(d40c, d40d);
1344
7d83a854
RV
1345 /* Initiate DMA job */
1346 d40_desc_load(d40c, d40d);
8d318a50 1347
7d83a854
RV
1348 /* Start dma job */
1349 err = d40_start(d40c);
8d318a50 1350
7d83a854
RV
1351 if (err)
1352 return NULL;
8d318a50
LW
1353 }
1354
1355 return d40d;
1356}
1357
1358/* called from interrupt context */
1359static void dma_tc_handle(struct d40_chan *d40c)
1360{
1361 struct d40_desc *d40d;
1362
8d318a50
LW
1363 /* Get first active entry from list */
1364 d40d = d40_first_active_get(d40c);
1365
1366 if (d40d == NULL)
1367 return;
1368
0c842b55
RV
1369 if (d40d->cyclic) {
1370 /*
1371 * If this was a paritially loaded list, we need to reloaded
1372 * it, and only when the list is completed. We need to check
1373 * for done because the interrupt will hit for every link, and
1374 * not just the last one.
1375 */
1376 if (d40d->lli_current < d40d->lli_len
1377 && !d40_tx_is_linked(d40c)
1378 && !d40_residue(d40c)) {
1379 d40_lcla_free_all(d40c, d40d);
1380 d40_desc_load(d40c, d40d);
1381 (void) d40_start(d40c);
8d318a50 1382
0c842b55
RV
1383 if (d40d->lli_current == d40d->lli_len)
1384 d40d->lli_current = 0;
1385 }
1386 } else {
1387 d40_lcla_free_all(d40c, d40d);
8d318a50 1388
0c842b55
RV
1389 if (d40d->lli_current < d40d->lli_len) {
1390 d40_desc_load(d40c, d40d);
1391 /* Start dma job */
1392 (void) d40_start(d40c);
1393 return;
1394 }
1395
1396 if (d40_queue_start(d40c) == NULL)
1397 d40c->busy = false;
7fb3e75e
N
1398 pm_runtime_mark_last_busy(d40c->base->dev);
1399 pm_runtime_put_autosuspend(d40c->base->dev);
0c842b55 1400 }
8d318a50
LW
1401
1402 d40c->pending_tx++;
1403 tasklet_schedule(&d40c->tasklet);
1404
1405}
1406
1407static void dma_tasklet(unsigned long data)
1408{
1409 struct d40_chan *d40c = (struct d40_chan *) data;
767a9675 1410 struct d40_desc *d40d;
8d318a50
LW
1411 unsigned long flags;
1412 dma_async_tx_callback callback;
1413 void *callback_param;
1414
1415 spin_lock_irqsave(&d40c->lock, flags);
1416
1417 /* Get first active entry from list */
767a9675 1418 d40d = d40_first_active_get(d40c);
767a9675 1419 if (d40d == NULL)
8d318a50
LW
1420 goto err;
1421
0c842b55 1422 if (!d40d->cyclic)
f7fbce07 1423 dma_cookie_complete(&d40d->txd);
8d318a50
LW
1424
1425 /*
1426 * If terminating a channel pending_tx is set to zero.
1427 * This prevents any finished active jobs to return to the client.
1428 */
1429 if (d40c->pending_tx == 0) {
1430 spin_unlock_irqrestore(&d40c->lock, flags);
1431 return;
1432 }
1433
1434 /* Callback to client */
767a9675
JA
1435 callback = d40d->txd.callback;
1436 callback_param = d40d->txd.callback_param;
1437
0c842b55
RV
1438 if (!d40d->cyclic) {
1439 if (async_tx_test_ack(&d40d->txd)) {
767a9675 1440 d40_desc_remove(d40d);
0c842b55
RV
1441 d40_desc_free(d40c, d40d);
1442 } else {
1443 if (!d40d->is_in_client_list) {
1444 d40_desc_remove(d40d);
1445 d40_lcla_free_all(d40c, d40d);
1446 list_add_tail(&d40d->node, &d40c->client);
1447 d40d->is_in_client_list = true;
1448 }
8d318a50
LW
1449 }
1450 }
1451
1452 d40c->pending_tx--;
1453
1454 if (d40c->pending_tx)
1455 tasklet_schedule(&d40c->tasklet);
1456
1457 spin_unlock_irqrestore(&d40c->lock, flags);
1458
767a9675 1459 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
8d318a50
LW
1460 callback(callback_param);
1461
1462 return;
1463
1bdae6f4
N
1464err:
1465 /* Rescue manouver if receiving double interrupts */
8d318a50
LW
1466 if (d40c->pending_tx > 0)
1467 d40c->pending_tx--;
1468 spin_unlock_irqrestore(&d40c->lock, flags);
1469}
1470
1471static irqreturn_t d40_handle_interrupt(int irq, void *data)
1472{
1473 static const struct d40_interrupt_lookup il[] = {
1474 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1475 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1476 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1477 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1478 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1479 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1480 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1481 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1482 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1483 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1484 };
1485
1486 int i;
1487 u32 regs[ARRAY_SIZE(il)];
8d318a50
LW
1488 u32 idx;
1489 u32 row;
1490 long chan = -1;
1491 struct d40_chan *d40c;
1492 unsigned long flags;
1493 struct d40_base *base = data;
1494
1495 spin_lock_irqsave(&base->interrupt_lock, flags);
1496
1497 /* Read interrupt status of both logical and physical channels */
1498 for (i = 0; i < ARRAY_SIZE(il); i++)
1499 regs[i] = readl(base->virtbase + il[i].src);
1500
1501 for (;;) {
1502
1503 chan = find_next_bit((unsigned long *)regs,
1504 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1505
1506 /* No more set bits found? */
1507 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1508 break;
1509
1510 row = chan / BITS_PER_LONG;
1511 idx = chan & (BITS_PER_LONG - 1);
1512
1513 /* ACK interrupt */
1b00348d 1514 writel(1 << idx, base->virtbase + il[row].clr);
8d318a50
LW
1515
1516 if (il[row].offset == D40_PHY_CHAN)
1517 d40c = base->lookup_phy_chans[idx];
1518 else
1519 d40c = base->lookup_log_chans[il[row].offset + idx];
1520 spin_lock(&d40c->lock);
1521
1522 if (!il[row].is_error)
1523 dma_tc_handle(d40c);
1524 else
6db5a8ba
RV
1525 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1526 chan, il[row].offset, idx);
8d318a50
LW
1527
1528 spin_unlock(&d40c->lock);
1529 }
1530
1531 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1532
1533 return IRQ_HANDLED;
1534}
1535
8d318a50
LW
1536static int d40_validate_conf(struct d40_chan *d40c,
1537 struct stedma40_chan_cfg *conf)
1538{
1539 int res = 0;
1540 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1541 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
38bdbf02 1542 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
8d318a50 1543
0747c7ba 1544 if (!conf->dir) {
6db5a8ba 1545 chan_err(d40c, "Invalid direction.\n");
0747c7ba
LW
1546 res = -EINVAL;
1547 }
1548
1549 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1550 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1551 d40c->runtime_addr == 0) {
1552
6db5a8ba
RV
1553 chan_err(d40c, "Invalid TX channel address (%d)\n",
1554 conf->dst_dev_type);
0747c7ba
LW
1555 res = -EINVAL;
1556 }
1557
1558 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1559 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1560 d40c->runtime_addr == 0) {
6db5a8ba
RV
1561 chan_err(d40c, "Invalid RX channel address (%d)\n",
1562 conf->src_dev_type);
0747c7ba
LW
1563 res = -EINVAL;
1564 }
1565
1566 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
8d318a50 1567 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
6db5a8ba 1568 chan_err(d40c, "Invalid dst\n");
8d318a50
LW
1569 res = -EINVAL;
1570 }
1571
0747c7ba 1572 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
8d318a50 1573 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
6db5a8ba 1574 chan_err(d40c, "Invalid src\n");
8d318a50
LW
1575 res = -EINVAL;
1576 }
1577
1578 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1579 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
6db5a8ba 1580 chan_err(d40c, "No event line\n");
8d318a50
LW
1581 res = -EINVAL;
1582 }
1583
1584 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1585 (src_event_group != dst_event_group)) {
6db5a8ba 1586 chan_err(d40c, "Invalid event group\n");
8d318a50
LW
1587 res = -EINVAL;
1588 }
1589
1590 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1591 /*
1592 * DMAC HW supports it. Will be added to this driver,
1593 * in case any dma client requires it.
1594 */
6db5a8ba 1595 chan_err(d40c, "periph to periph not supported\n");
8d318a50
LW
1596 res = -EINVAL;
1597 }
1598
d49278e3
PF
1599 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1600 (1 << conf->src_info.data_width) !=
1601 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1602 (1 << conf->dst_info.data_width)) {
1603 /*
1604 * The DMAC hardware only supports
1605 * src (burst x width) == dst (burst x width)
1606 */
1607
6db5a8ba 1608 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
d49278e3
PF
1609 res = -EINVAL;
1610 }
1611
8d318a50
LW
1612 return res;
1613}
1614
5cd326fd
N
1615static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1616 bool is_src, int log_event_line, bool is_log,
1617 bool *first_user)
8d318a50
LW
1618{
1619 unsigned long flags;
1620 spin_lock_irqsave(&phy->lock, flags);
5cd326fd
N
1621
1622 *first_user = ((phy->allocated_src | phy->allocated_dst)
1623 == D40_ALLOC_FREE);
1624
4aed79b2 1625 if (!is_log) {
8d318a50
LW
1626 /* Physical interrupts are masked per physical full channel */
1627 if (phy->allocated_src == D40_ALLOC_FREE &&
1628 phy->allocated_dst == D40_ALLOC_FREE) {
1629 phy->allocated_dst = D40_ALLOC_PHY;
1630 phy->allocated_src = D40_ALLOC_PHY;
1631 goto found;
1632 } else
1633 goto not_found;
1634 }
1635
1636 /* Logical channel */
1637 if (is_src) {
1638 if (phy->allocated_src == D40_ALLOC_PHY)
1639 goto not_found;
1640
1641 if (phy->allocated_src == D40_ALLOC_FREE)
1642 phy->allocated_src = D40_ALLOC_LOG_FREE;
1643
1644 if (!(phy->allocated_src & (1 << log_event_line))) {
1645 phy->allocated_src |= 1 << log_event_line;
1646 goto found;
1647 } else
1648 goto not_found;
1649 } else {
1650 if (phy->allocated_dst == D40_ALLOC_PHY)
1651 goto not_found;
1652
1653 if (phy->allocated_dst == D40_ALLOC_FREE)
1654 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1655
1656 if (!(phy->allocated_dst & (1 << log_event_line))) {
1657 phy->allocated_dst |= 1 << log_event_line;
1658 goto found;
1659 } else
1660 goto not_found;
1661 }
1662
1663not_found:
1664 spin_unlock_irqrestore(&phy->lock, flags);
1665 return false;
1666found:
1667 spin_unlock_irqrestore(&phy->lock, flags);
1668 return true;
1669}
1670
1671static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1672 int log_event_line)
1673{
1674 unsigned long flags;
1675 bool is_free = false;
1676
1677 spin_lock_irqsave(&phy->lock, flags);
1678 if (!log_event_line) {
8d318a50
LW
1679 phy->allocated_dst = D40_ALLOC_FREE;
1680 phy->allocated_src = D40_ALLOC_FREE;
1681 is_free = true;
1682 goto out;
1683 }
1684
1685 /* Logical channel */
1686 if (is_src) {
1687 phy->allocated_src &= ~(1 << log_event_line);
1688 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1689 phy->allocated_src = D40_ALLOC_FREE;
1690 } else {
1691 phy->allocated_dst &= ~(1 << log_event_line);
1692 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1693 phy->allocated_dst = D40_ALLOC_FREE;
1694 }
1695
1696 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1697 D40_ALLOC_FREE);
1698
1699out:
1700 spin_unlock_irqrestore(&phy->lock, flags);
1701
1702 return is_free;
1703}
1704
5cd326fd 1705static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
8d318a50
LW
1706{
1707 int dev_type;
1708 int event_group;
1709 int event_line;
1710 struct d40_phy_res *phys;
1711 int i;
1712 int j;
1713 int log_num;
1714 bool is_src;
38bdbf02 1715 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
8d318a50
LW
1716
1717 phys = d40c->base->phy_res;
1718
1719 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1720 dev_type = d40c->dma_cfg.src_dev_type;
1721 log_num = 2 * dev_type;
1722 is_src = true;
1723 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1724 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1725 /* dst event lines are used for logical memcpy */
1726 dev_type = d40c->dma_cfg.dst_dev_type;
1727 log_num = 2 * dev_type + 1;
1728 is_src = false;
1729 } else
1730 return -EINVAL;
1731
1732 event_group = D40_TYPE_TO_GROUP(dev_type);
1733 event_line = D40_TYPE_TO_EVENT(dev_type);
1734
1735 if (!is_log) {
1736 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1737 /* Find physical half channel */
1738 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1739
4aed79b2 1740 if (d40_alloc_mask_set(&phys[i], is_src,
5cd326fd
N
1741 0, is_log,
1742 first_phy_user))
8d318a50
LW
1743 goto found_phy;
1744 }
1745 } else
1746 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1747 int phy_num = j + event_group * 2;
1748 for (i = phy_num; i < phy_num + 2; i++) {
508849ad
LW
1749 if (d40_alloc_mask_set(&phys[i],
1750 is_src,
1751 0,
5cd326fd
N
1752 is_log,
1753 first_phy_user))
8d318a50
LW
1754 goto found_phy;
1755 }
1756 }
1757 return -EINVAL;
1758found_phy:
1759 d40c->phy_chan = &phys[i];
1760 d40c->log_num = D40_PHY_CHAN;
1761 goto out;
1762 }
1763 if (dev_type == -1)
1764 return -EINVAL;
1765
1766 /* Find logical channel */
1767 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1768 int phy_num = j + event_group * 2;
5cd326fd
N
1769
1770 if (d40c->dma_cfg.use_fixed_channel) {
1771 i = d40c->dma_cfg.phy_channel;
1772
1773 if ((i != phy_num) && (i != phy_num + 1)) {
1774 dev_err(chan2dev(d40c),
1775 "invalid fixed phy channel %d\n", i);
1776 return -EINVAL;
1777 }
1778
1779 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1780 is_log, first_phy_user))
1781 goto found_log;
1782
1783 dev_err(chan2dev(d40c),
1784 "could not allocate fixed phy channel %d\n", i);
1785 return -EINVAL;
1786 }
1787
8d318a50
LW
1788 /*
1789 * Spread logical channels across all available physical rather
1790 * than pack every logical channel at the first available phy
1791 * channels.
1792 */
1793 if (is_src) {
1794 for (i = phy_num; i < phy_num + 2; i++) {
1795 if (d40_alloc_mask_set(&phys[i], is_src,
5cd326fd
N
1796 event_line, is_log,
1797 first_phy_user))
8d318a50
LW
1798 goto found_log;
1799 }
1800 } else {
1801 for (i = phy_num + 1; i >= phy_num; i--) {
1802 if (d40_alloc_mask_set(&phys[i], is_src,
5cd326fd
N
1803 event_line, is_log,
1804 first_phy_user))
8d318a50
LW
1805 goto found_log;
1806 }
1807 }
1808 }
1809 return -EINVAL;
1810
1811found_log:
1812 d40c->phy_chan = &phys[i];
1813 d40c->log_num = log_num;
1814out:
1815
1816 if (is_log)
1817 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1818 else
1819 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1820
1821 return 0;
1822
1823}
1824
8d318a50
LW
1825static int d40_config_memcpy(struct d40_chan *d40c)
1826{
1827 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1828
1829 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1830 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1831 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1832 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1833 memcpy[d40c->chan.chan_id];
1834
1835 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1836 dma_has_cap(DMA_SLAVE, cap)) {
1837 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1838 } else {
6db5a8ba 1839 chan_err(d40c, "No memcpy\n");
8d318a50
LW
1840 return -EINVAL;
1841 }
1842
1843 return 0;
1844}
1845
8d318a50
LW
1846static int d40_free_dma(struct d40_chan *d40c)
1847{
1848
1849 int res = 0;
d181b3a8 1850 u32 event;
8d318a50
LW
1851 struct d40_phy_res *phy = d40c->phy_chan;
1852 bool is_src;
1853
1854 /* Terminate all queued and active transfers */
1855 d40_term_all(d40c);
1856
1857 if (phy == NULL) {
6db5a8ba 1858 chan_err(d40c, "phy == null\n");
8d318a50
LW
1859 return -EINVAL;
1860 }
1861
1862 if (phy->allocated_src == D40_ALLOC_FREE &&
1863 phy->allocated_dst == D40_ALLOC_FREE) {
6db5a8ba 1864 chan_err(d40c, "channel already free\n");
8d318a50
LW
1865 return -EINVAL;
1866 }
1867
8d318a50
LW
1868 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1869 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1870 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
8d318a50
LW
1871 is_src = false;
1872 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1873 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
8d318a50
LW
1874 is_src = true;
1875 } else {
6db5a8ba 1876 chan_err(d40c, "Unknown direction\n");
8d318a50
LW
1877 return -EINVAL;
1878 }
1879
7fb3e75e 1880 pm_runtime_get_sync(d40c->base->dev);
1bdae6f4 1881 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
d181b3a8 1882 if (res) {
1bdae6f4 1883 chan_err(d40c, "stop failed\n");
7fb3e75e 1884 goto out;
d181b3a8
JA
1885 }
1886
1bdae6f4 1887 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
8d318a50 1888
1bdae6f4 1889 if (chan_is_logical(d40c))
8d318a50 1890 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1bdae6f4
N
1891 else
1892 d40c->base->lookup_phy_chans[phy->num] = NULL;
7fb3e75e
N
1893
1894 if (d40c->busy) {
1895 pm_runtime_mark_last_busy(d40c->base->dev);
1896 pm_runtime_put_autosuspend(d40c->base->dev);
1897 }
1898
1899 d40c->busy = false;
8d318a50 1900 d40c->phy_chan = NULL;
ce2ca125 1901 d40c->configured = false;
7fb3e75e 1902out:
8d318a50 1903
7fb3e75e
N
1904 pm_runtime_mark_last_busy(d40c->base->dev);
1905 pm_runtime_put_autosuspend(d40c->base->dev);
1906 return res;
8d318a50
LW
1907}
1908
a5ebca47
JA
1909static bool d40_is_paused(struct d40_chan *d40c)
1910{
8ca84687 1911 void __iomem *chanbase = chan_base(d40c);
a5ebca47
JA
1912 bool is_paused = false;
1913 unsigned long flags;
1914 void __iomem *active_reg;
1915 u32 status;
1916 u32 event;
a5ebca47
JA
1917
1918 spin_lock_irqsave(&d40c->lock, flags);
1919
724a8577 1920 if (chan_is_physical(d40c)) {
a5ebca47
JA
1921 if (d40c->phy_chan->num % 2 == 0)
1922 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1923 else
1924 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1925
1926 status = (readl(active_reg) &
1927 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1928 D40_CHAN_POS(d40c->phy_chan->num);
1929 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1930 is_paused = true;
1931
1932 goto _exit;
1933 }
1934
a5ebca47 1935 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
9dbfbd35 1936 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
a5ebca47 1937 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
8ca84687 1938 status = readl(chanbase + D40_CHAN_REG_SDLNK);
9dbfbd35 1939 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
a5ebca47 1940 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
8ca84687 1941 status = readl(chanbase + D40_CHAN_REG_SSLNK);
9dbfbd35 1942 } else {
6db5a8ba 1943 chan_err(d40c, "Unknown direction\n");
a5ebca47
JA
1944 goto _exit;
1945 }
9dbfbd35 1946
a5ebca47
JA
1947 status = (status & D40_EVENTLINE_MASK(event)) >>
1948 D40_EVENTLINE_POS(event);
1949
1950 if (status != D40_DMA_RUN)
1951 is_paused = true;
a5ebca47
JA
1952_exit:
1953 spin_unlock_irqrestore(&d40c->lock, flags);
1954 return is_paused;
1955
1956}
1957
1958
8d318a50
LW
1959static u32 stedma40_residue(struct dma_chan *chan)
1960{
1961 struct d40_chan *d40c =
1962 container_of(chan, struct d40_chan, chan);
1963 u32 bytes_left;
1964 unsigned long flags;
1965
1966 spin_lock_irqsave(&d40c->lock, flags);
1967 bytes_left = d40_residue(d40c);
1968 spin_unlock_irqrestore(&d40c->lock, flags);
1969
1970 return bytes_left;
1971}
1972
3e3a0763
RV
1973static int
1974d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1975 struct scatterlist *sg_src, struct scatterlist *sg_dst,
822c5676
RV
1976 unsigned int sg_len, dma_addr_t src_dev_addr,
1977 dma_addr_t dst_dev_addr)
3e3a0763
RV
1978{
1979 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1980 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1981 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
5ed04b85 1982 int ret;
3e3a0763 1983
5ed04b85
RV
1984 ret = d40_log_sg_to_lli(sg_src, sg_len,
1985 src_dev_addr,
1986 desc->lli_log.src,
1987 chan->log_def.lcsp1,
1988 src_info->data_width,
1989 dst_info->data_width);
1990
1991 ret = d40_log_sg_to_lli(sg_dst, sg_len,
1992 dst_dev_addr,
1993 desc->lli_log.dst,
1994 chan->log_def.lcsp3,
1995 dst_info->data_width,
1996 src_info->data_width);
1997
1998 return ret < 0 ? ret : 0;
3e3a0763
RV
1999}
2000
2001static int
2002d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2003 struct scatterlist *sg_src, struct scatterlist *sg_dst,
822c5676
RV
2004 unsigned int sg_len, dma_addr_t src_dev_addr,
2005 dma_addr_t dst_dev_addr)
3e3a0763 2006{
3e3a0763
RV
2007 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2008 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2009 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
0c842b55 2010 unsigned long flags = 0;
3e3a0763
RV
2011 int ret;
2012
0c842b55
RV
2013 if (desc->cyclic)
2014 flags |= LLI_CYCLIC | LLI_TERM_INT;
2015
3e3a0763
RV
2016 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2017 desc->lli_phy.src,
2018 virt_to_phys(desc->lli_phy.src),
2019 chan->src_def_cfg,
0c842b55 2020 src_info, dst_info, flags);
3e3a0763
RV
2021
2022 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2023 desc->lli_phy.dst,
2024 virt_to_phys(desc->lli_phy.dst),
2025 chan->dst_def_cfg,
0c842b55 2026 dst_info, src_info, flags);
3e3a0763
RV
2027
2028 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2029 desc->lli_pool.size, DMA_TO_DEVICE);
2030
2031 return ret < 0 ? ret : 0;
2032}
2033
2034
5f81158f
RV
2035static struct d40_desc *
2036d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2037 unsigned int sg_len, unsigned long dma_flags)
2038{
2039 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2040 struct d40_desc *desc;
dbd88788 2041 int ret;
5f81158f
RV
2042
2043 desc = d40_desc_get(chan);
2044 if (!desc)
2045 return NULL;
2046
2047 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2048 cfg->dst_info.data_width);
2049 if (desc->lli_len < 0) {
2050 chan_err(chan, "Unaligned size\n");
dbd88788
RV
2051 goto err;
2052 }
5f81158f 2053
dbd88788
RV
2054 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2055 if (ret < 0) {
2056 chan_err(chan, "Could not allocate lli\n");
2057 goto err;
5f81158f
RV
2058 }
2059
dbd88788 2060
5f81158f
RV
2061 desc->lli_current = 0;
2062 desc->txd.flags = dma_flags;
2063 desc->txd.tx_submit = d40_tx_submit;
2064
2065 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2066
2067 return desc;
dbd88788
RV
2068
2069err:
2070 d40_desc_free(chan, desc);
2071 return NULL;
5f81158f
RV
2072}
2073
cade1d30 2074static dma_addr_t
db8196df 2075d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
8d318a50 2076{
cade1d30
RV
2077 struct stedma40_platform_data *plat = chan->base->plat_data;
2078 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
711b9cea 2079 dma_addr_t addr = 0;
cade1d30
RV
2080
2081 if (chan->runtime_addr)
2082 return chan->runtime_addr;
2083
db8196df 2084 if (direction == DMA_DEV_TO_MEM)
cade1d30 2085 addr = plat->dev_rx[cfg->src_dev_type];
db8196df 2086 else if (direction == DMA_MEM_TO_DEV)
cade1d30
RV
2087 addr = plat->dev_tx[cfg->dst_dev_type];
2088
2089 return addr;
2090}
2091
2092static struct dma_async_tx_descriptor *
2093d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2094 struct scatterlist *sg_dst, unsigned int sg_len,
db8196df 2095 enum dma_transfer_direction direction, unsigned long dma_flags)
cade1d30
RV
2096{
2097 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
822c5676
RV
2098 dma_addr_t src_dev_addr = 0;
2099 dma_addr_t dst_dev_addr = 0;
cade1d30 2100 struct d40_desc *desc;
2a614340 2101 unsigned long flags;
cade1d30 2102 int ret;
8d318a50 2103
cade1d30
RV
2104 if (!chan->phy_chan) {
2105 chan_err(chan, "Cannot prepare unallocated channel\n");
2106 return NULL;
0d0f6b8b
JA
2107 }
2108
0c842b55 2109
cade1d30 2110 spin_lock_irqsave(&chan->lock, flags);
8d318a50 2111
cade1d30
RV
2112 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2113 if (desc == NULL)
8d318a50
LW
2114 goto err;
2115
0c842b55
RV
2116 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2117 desc->cyclic = true;
2118
7e426da8 2119 if (direction != DMA_TRANS_NONE) {
822c5676
RV
2120 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2121
db8196df 2122 if (direction == DMA_DEV_TO_MEM)
822c5676 2123 src_dev_addr = dev_addr;
db8196df 2124 else if (direction == DMA_MEM_TO_DEV)
822c5676
RV
2125 dst_dev_addr = dev_addr;
2126 }
cade1d30
RV
2127
2128 if (chan_is_logical(chan))
2129 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
822c5676 2130 sg_len, src_dev_addr, dst_dev_addr);
cade1d30
RV
2131 else
2132 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
822c5676 2133 sg_len, src_dev_addr, dst_dev_addr);
cade1d30
RV
2134
2135 if (ret) {
2136 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2137 chan_is_logical(chan) ? "log" : "phy", ret);
2138 goto err;
8d318a50
LW
2139 }
2140
82babbb3
PF
2141 /*
2142 * add descriptor to the prepare queue in order to be able
2143 * to free them later in terminate_all
2144 */
2145 list_add_tail(&desc->node, &chan->prepare_queue);
2146
cade1d30
RV
2147 spin_unlock_irqrestore(&chan->lock, flags);
2148
2149 return &desc->txd;
8d318a50 2150
8d318a50 2151err:
cade1d30
RV
2152 if (desc)
2153 d40_desc_free(chan, desc);
2154 spin_unlock_irqrestore(&chan->lock, flags);
8d318a50
LW
2155 return NULL;
2156}
8d318a50
LW
2157
2158bool stedma40_filter(struct dma_chan *chan, void *data)
2159{
2160 struct stedma40_chan_cfg *info = data;
2161 struct d40_chan *d40c =
2162 container_of(chan, struct d40_chan, chan);
2163 int err;
2164
2165 if (data) {
2166 err = d40_validate_conf(d40c, info);
2167 if (!err)
2168 d40c->dma_cfg = *info;
2169 } else
2170 err = d40_config_memcpy(d40c);
2171
ce2ca125
RV
2172 if (!err)
2173 d40c->configured = true;
2174
8d318a50
LW
2175 return err == 0;
2176}
2177EXPORT_SYMBOL(stedma40_filter);
2178
ac2c0a38
RV
2179static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2180{
2181 bool realtime = d40c->dma_cfg.realtime;
2182 bool highprio = d40c->dma_cfg.high_priority;
2183 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
2184 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
2185 u32 event = D40_TYPE_TO_EVENT(dev_type);
2186 u32 group = D40_TYPE_TO_GROUP(dev_type);
2187 u32 bit = 1 << event;
2188
2189 /* Destination event lines are stored in the upper halfword */
2190 if (!src)
2191 bit <<= 16;
2192
2193 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2194 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2195}
2196
2197static void d40_set_prio_realtime(struct d40_chan *d40c)
2198{
2199 if (d40c->base->rev < 3)
2200 return;
2201
2202 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
2203 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
2204 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
2205
2206 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
2207 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
2208 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
2209}
2210
8d318a50
LW
2211/* DMA ENGINE functions */
2212static int d40_alloc_chan_resources(struct dma_chan *chan)
2213{
2214 int err;
2215 unsigned long flags;
2216 struct d40_chan *d40c =
2217 container_of(chan, struct d40_chan, chan);
ef1872ec 2218 bool is_free_phy;
8d318a50
LW
2219 spin_lock_irqsave(&d40c->lock, flags);
2220
d3ee98cd 2221 dma_cookie_init(chan);
8d318a50 2222
ce2ca125
RV
2223 /* If no dma configuration is set use default configuration (memcpy) */
2224 if (!d40c->configured) {
8d318a50 2225 err = d40_config_memcpy(d40c);
ff0b12ba 2226 if (err) {
6db5a8ba 2227 chan_err(d40c, "Failed to configure memcpy channel\n");
ff0b12ba
JA
2228 goto fail;
2229 }
8d318a50
LW
2230 }
2231
5cd326fd 2232 err = d40_allocate_channel(d40c, &is_free_phy);
8d318a50 2233 if (err) {
6db5a8ba 2234 chan_err(d40c, "Failed to allocate channel\n");
7fb3e75e 2235 d40c->configured = false;
ff0b12ba 2236 goto fail;
8d318a50
LW
2237 }
2238
7fb3e75e 2239 pm_runtime_get_sync(d40c->base->dev);
ef1872ec
LW
2240 /* Fill in basic CFG register values */
2241 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
724a8577 2242 &d40c->dst_def_cfg, chan_is_logical(d40c));
ef1872ec 2243
ac2c0a38
RV
2244 d40_set_prio_realtime(d40c);
2245
724a8577 2246 if (chan_is_logical(d40c)) {
ef1872ec
LW
2247 d40_log_cfg(&d40c->dma_cfg,
2248 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2249
2250 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2251 d40c->lcpa = d40c->base->lcpa_base +
2252 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
2253 else
2254 d40c->lcpa = d40c->base->lcpa_base +
2255 d40c->dma_cfg.dst_dev_type *
2256 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2257 }
2258
5cd326fd
N
2259 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2260 chan_is_logical(d40c) ? "logical" : "physical",
2261 d40c->phy_chan->num,
2262 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2263
2264
ef1872ec
LW
2265 /*
2266 * Only write channel configuration to the DMA if the physical
2267 * resource is free. In case of multiple logical channels
2268 * on the same physical resource, only the first write is necessary.
2269 */
b55912c6
JA
2270 if (is_free_phy)
2271 d40_config_write(d40c);
ff0b12ba 2272fail:
7fb3e75e
N
2273 pm_runtime_mark_last_busy(d40c->base->dev);
2274 pm_runtime_put_autosuspend(d40c->base->dev);
8d318a50 2275 spin_unlock_irqrestore(&d40c->lock, flags);
ff0b12ba 2276 return err;
8d318a50
LW
2277}
2278
2279static void d40_free_chan_resources(struct dma_chan *chan)
2280{
2281 struct d40_chan *d40c =
2282 container_of(chan, struct d40_chan, chan);
2283 int err;
2284 unsigned long flags;
2285
0d0f6b8b 2286 if (d40c->phy_chan == NULL) {
6db5a8ba 2287 chan_err(d40c, "Cannot free unallocated channel\n");
0d0f6b8b
JA
2288 return;
2289 }
2290
2291
8d318a50
LW
2292 spin_lock_irqsave(&d40c->lock, flags);
2293
2294 err = d40_free_dma(d40c);
2295
2296 if (err)
6db5a8ba 2297 chan_err(d40c, "Failed to free channel\n");
8d318a50
LW
2298 spin_unlock_irqrestore(&d40c->lock, flags);
2299}
2300
2301static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2302 dma_addr_t dst,
2303 dma_addr_t src,
2304 size_t size,
2a614340 2305 unsigned long dma_flags)
8d318a50 2306{
95944c6e
RV
2307 struct scatterlist dst_sg;
2308 struct scatterlist src_sg;
8d318a50 2309
95944c6e
RV
2310 sg_init_table(&dst_sg, 1);
2311 sg_init_table(&src_sg, 1);
8d318a50 2312
95944c6e
RV
2313 sg_dma_address(&dst_sg) = dst;
2314 sg_dma_address(&src_sg) = src;
8d318a50 2315
95944c6e
RV
2316 sg_dma_len(&dst_sg) = size;
2317 sg_dma_len(&src_sg) = size;
8d318a50 2318
cade1d30 2319 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
8d318a50
LW
2320}
2321
0d688662 2322static struct dma_async_tx_descriptor *
cade1d30
RV
2323d40_prep_memcpy_sg(struct dma_chan *chan,
2324 struct scatterlist *dst_sg, unsigned int dst_nents,
2325 struct scatterlist *src_sg, unsigned int src_nents,
2326 unsigned long dma_flags)
0d688662
IS
2327{
2328 if (dst_nents != src_nents)
2329 return NULL;
2330
cade1d30 2331 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
00ac0341
RV
2332}
2333
8d318a50
LW
2334static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2335 struct scatterlist *sgl,
2336 unsigned int sg_len,
db8196df 2337 enum dma_transfer_direction direction,
185ecb5f
AB
2338 unsigned long dma_flags,
2339 void *context)
8d318a50 2340{
db8196df 2341 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
00ac0341
RV
2342 return NULL;
2343
cade1d30 2344 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
8d318a50
LW
2345}
2346
0c842b55
RV
2347static struct dma_async_tx_descriptor *
2348dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2349 size_t buf_len, size_t period_len,
185ecb5f 2350 enum dma_transfer_direction direction, void *context)
0c842b55
RV
2351{
2352 unsigned int periods = buf_len / period_len;
2353 struct dma_async_tx_descriptor *txd;
2354 struct scatterlist *sg;
2355 int i;
2356
79ca7ec3 2357 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
0c842b55
RV
2358 for (i = 0; i < periods; i++) {
2359 sg_dma_address(&sg[i]) = dma_addr;
2360 sg_dma_len(&sg[i]) = period_len;
2361 dma_addr += period_len;
2362 }
2363
2364 sg[periods].offset = 0;
fdaf9c4b 2365 sg_dma_len(&sg[periods]) = 0;
0c842b55
RV
2366 sg[periods].page_link =
2367 ((unsigned long)sg | 0x01) & ~0x02;
2368
2369 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2370 DMA_PREP_INTERRUPT);
2371
2372 kfree(sg);
2373
2374 return txd;
2375}
2376
8d318a50
LW
2377static enum dma_status d40_tx_status(struct dma_chan *chan,
2378 dma_cookie_t cookie,
2379 struct dma_tx_state *txstate)
2380{
2381 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
96a2af41 2382 enum dma_status ret;
8d318a50 2383
0d0f6b8b 2384 if (d40c->phy_chan == NULL) {
6db5a8ba 2385 chan_err(d40c, "Cannot read status of unallocated channel\n");
0d0f6b8b
JA
2386 return -EINVAL;
2387 }
2388
96a2af41
RKAL
2389 ret = dma_cookie_status(chan, cookie, txstate);
2390 if (ret != DMA_SUCCESS)
2391 dma_set_residue(txstate, stedma40_residue(chan));
8d318a50 2392
a5ebca47
JA
2393 if (d40_is_paused(d40c))
2394 ret = DMA_PAUSED;
8d318a50
LW
2395
2396 return ret;
2397}
2398
2399static void d40_issue_pending(struct dma_chan *chan)
2400{
2401 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2402 unsigned long flags;
2403
0d0f6b8b 2404 if (d40c->phy_chan == NULL) {
6db5a8ba 2405 chan_err(d40c, "Channel is not allocated!\n");
0d0f6b8b
JA
2406 return;
2407 }
2408
8d318a50
LW
2409 spin_lock_irqsave(&d40c->lock, flags);
2410
a8f3067b
PF
2411 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2412
2413 /* Busy means that queued jobs are already being processed */
8d318a50
LW
2414 if (!d40c->busy)
2415 (void) d40_queue_start(d40c);
2416
2417 spin_unlock_irqrestore(&d40c->lock, flags);
2418}
2419
1bdae6f4
N
2420static void d40_terminate_all(struct dma_chan *chan)
2421{
2422 unsigned long flags;
2423 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2424 int ret;
2425
2426 spin_lock_irqsave(&d40c->lock, flags);
2427
2428 pm_runtime_get_sync(d40c->base->dev);
2429 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2430 if (ret)
2431 chan_err(d40c, "Failed to stop channel\n");
2432
2433 d40_term_all(d40c);
2434 pm_runtime_mark_last_busy(d40c->base->dev);
2435 pm_runtime_put_autosuspend(d40c->base->dev);
2436 if (d40c->busy) {
2437 pm_runtime_mark_last_busy(d40c->base->dev);
2438 pm_runtime_put_autosuspend(d40c->base->dev);
2439 }
2440 d40c->busy = false;
2441
2442 spin_unlock_irqrestore(&d40c->lock, flags);
2443}
2444
98ca5289
RV
2445static int
2446dma40_config_to_halfchannel(struct d40_chan *d40c,
2447 struct stedma40_half_channel_info *info,
2448 enum dma_slave_buswidth width,
2449 u32 maxburst)
2450{
2451 enum stedma40_periph_data_width addr_width;
2452 int psize;
2453
2454 switch (width) {
2455 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2456 addr_width = STEDMA40_BYTE_WIDTH;
2457 break;
2458 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2459 addr_width = STEDMA40_HALFWORD_WIDTH;
2460 break;
2461 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2462 addr_width = STEDMA40_WORD_WIDTH;
2463 break;
2464 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2465 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2466 break;
2467 default:
2468 dev_err(d40c->base->dev,
2469 "illegal peripheral address width "
2470 "requested (%d)\n",
2471 width);
2472 return -EINVAL;
2473 }
2474
2475 if (chan_is_logical(d40c)) {
2476 if (maxburst >= 16)
2477 psize = STEDMA40_PSIZE_LOG_16;
2478 else if (maxburst >= 8)
2479 psize = STEDMA40_PSIZE_LOG_8;
2480 else if (maxburst >= 4)
2481 psize = STEDMA40_PSIZE_LOG_4;
2482 else
2483 psize = STEDMA40_PSIZE_LOG_1;
2484 } else {
2485 if (maxburst >= 16)
2486 psize = STEDMA40_PSIZE_PHY_16;
2487 else if (maxburst >= 8)
2488 psize = STEDMA40_PSIZE_PHY_8;
2489 else if (maxburst >= 4)
2490 psize = STEDMA40_PSIZE_PHY_4;
2491 else
2492 psize = STEDMA40_PSIZE_PHY_1;
2493 }
2494
2495 info->data_width = addr_width;
2496 info->psize = psize;
2497 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2498
2499 return 0;
2500}
2501
95e1400f 2502/* Runtime reconfiguration extension */
98ca5289
RV
2503static int d40_set_runtime_config(struct dma_chan *chan,
2504 struct dma_slave_config *config)
95e1400f
LW
2505{
2506 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2507 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
98ca5289 2508 enum dma_slave_buswidth src_addr_width, dst_addr_width;
95e1400f 2509 dma_addr_t config_addr;
98ca5289
RV
2510 u32 src_maxburst, dst_maxburst;
2511 int ret;
2512
2513 src_addr_width = config->src_addr_width;
2514 src_maxburst = config->src_maxburst;
2515 dst_addr_width = config->dst_addr_width;
2516 dst_maxburst = config->dst_maxburst;
95e1400f 2517
db8196df 2518 if (config->direction == DMA_DEV_TO_MEM) {
95e1400f
LW
2519 dma_addr_t dev_addr_rx =
2520 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2521
2522 config_addr = config->src_addr;
2523 if (dev_addr_rx)
2524 dev_dbg(d40c->base->dev,
2525 "channel has a pre-wired RX address %08x "
2526 "overriding with %08x\n",
2527 dev_addr_rx, config_addr);
2528 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2529 dev_dbg(d40c->base->dev,
2530 "channel was not configured for peripheral "
2531 "to memory transfer (%d) overriding\n",
2532 cfg->dir);
2533 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2534
98ca5289
RV
2535 /* Configure the memory side */
2536 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2537 dst_addr_width = src_addr_width;
2538 if (dst_maxburst == 0)
2539 dst_maxburst = src_maxburst;
95e1400f 2540
db8196df 2541 } else if (config->direction == DMA_MEM_TO_DEV) {
95e1400f
LW
2542 dma_addr_t dev_addr_tx =
2543 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2544
2545 config_addr = config->dst_addr;
2546 if (dev_addr_tx)
2547 dev_dbg(d40c->base->dev,
2548 "channel has a pre-wired TX address %08x "
2549 "overriding with %08x\n",
2550 dev_addr_tx, config_addr);
2551 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2552 dev_dbg(d40c->base->dev,
2553 "channel was not configured for memory "
2554 "to peripheral transfer (%d) overriding\n",
2555 cfg->dir);
2556 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2557
98ca5289
RV
2558 /* Configure the memory side */
2559 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2560 src_addr_width = dst_addr_width;
2561 if (src_maxburst == 0)
2562 src_maxburst = dst_maxburst;
95e1400f
LW
2563 } else {
2564 dev_err(d40c->base->dev,
2565 "unrecognized channel direction %d\n",
2566 config->direction);
98ca5289 2567 return -EINVAL;
95e1400f
LW
2568 }
2569
98ca5289 2570 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
95e1400f 2571 dev_err(d40c->base->dev,
98ca5289
RV
2572 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2573 src_maxburst,
2574 src_addr_width,
2575 dst_maxburst,
2576 dst_addr_width);
2577 return -EINVAL;
95e1400f
LW
2578 }
2579
98ca5289
RV
2580 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2581 src_addr_width,
2582 src_maxburst);
2583 if (ret)
2584 return ret;
95e1400f 2585
98ca5289
RV
2586 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2587 dst_addr_width,
2588 dst_maxburst);
2589 if (ret)
2590 return ret;
95e1400f 2591
a59670a4 2592 /* Fill in register values */
724a8577 2593 if (chan_is_logical(d40c))
a59670a4
PF
2594 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2595 else
2596 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2597 &d40c->dst_def_cfg, false);
2598
95e1400f
LW
2599 /* These settings will take precedence later */
2600 d40c->runtime_addr = config_addr;
2601 d40c->runtime_direction = config->direction;
2602 dev_dbg(d40c->base->dev,
98ca5289
RV
2603 "configured channel %s for %s, data width %d/%d, "
2604 "maxburst %d/%d elements, LE, no flow control\n",
95e1400f 2605 dma_chan_name(chan),
db8196df 2606 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
98ca5289
RV
2607 src_addr_width, dst_addr_width,
2608 src_maxburst, dst_maxburst);
2609
2610 return 0;
95e1400f
LW
2611}
2612
05827630
LW
2613static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2614 unsigned long arg)
8d318a50 2615{
8d318a50
LW
2616 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2617
0d0f6b8b 2618 if (d40c->phy_chan == NULL) {
6db5a8ba 2619 chan_err(d40c, "Channel is not allocated!\n");
0d0f6b8b
JA
2620 return -EINVAL;
2621 }
2622
8d318a50
LW
2623 switch (cmd) {
2624 case DMA_TERMINATE_ALL:
1bdae6f4
N
2625 d40_terminate_all(chan);
2626 return 0;
8d318a50 2627 case DMA_PAUSE:
86eb5fb6 2628 return d40_pause(d40c);
8d318a50 2629 case DMA_RESUME:
86eb5fb6 2630 return d40_resume(d40c);
95e1400f 2631 case DMA_SLAVE_CONFIG:
98ca5289 2632 return d40_set_runtime_config(chan,
95e1400f 2633 (struct dma_slave_config *) arg);
95e1400f
LW
2634 default:
2635 break;
8d318a50
LW
2636 }
2637
2638 /* Other commands are unimplemented */
2639 return -ENXIO;
2640}
2641
2642/* Initialization functions */
2643
2644static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2645 struct d40_chan *chans, int offset,
2646 int num_chans)
2647{
2648 int i = 0;
2649 struct d40_chan *d40c;
2650
2651 INIT_LIST_HEAD(&dma->channels);
2652
2653 for (i = offset; i < offset + num_chans; i++) {
2654 d40c = &chans[i];
2655 d40c->base = base;
2656 d40c->chan.device = dma;
2657
8d318a50
LW
2658 spin_lock_init(&d40c->lock);
2659
2660 d40c->log_num = D40_PHY_CHAN;
2661
8d318a50
LW
2662 INIT_LIST_HEAD(&d40c->active);
2663 INIT_LIST_HEAD(&d40c->queue);
a8f3067b 2664 INIT_LIST_HEAD(&d40c->pending_queue);
8d318a50 2665 INIT_LIST_HEAD(&d40c->client);
82babbb3 2666 INIT_LIST_HEAD(&d40c->prepare_queue);
8d318a50 2667
8d318a50
LW
2668 tasklet_init(&d40c->tasklet, dma_tasklet,
2669 (unsigned long) d40c);
2670
2671 list_add_tail(&d40c->chan.device_node,
2672 &dma->channels);
2673 }
2674}
2675
7ad74a7c
RV
2676static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2677{
2678 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2679 dev->device_prep_slave_sg = d40_prep_slave_sg;
2680
2681 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2682 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2683
2684 /*
2685 * This controller can only access address at even
2686 * 32bit boundaries, i.e. 2^2
2687 */
2688 dev->copy_align = 2;
2689 }
2690
2691 if (dma_has_cap(DMA_SG, dev->cap_mask))
2692 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2693
0c842b55
RV
2694 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2695 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2696
7ad74a7c
RV
2697 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2698 dev->device_free_chan_resources = d40_free_chan_resources;
2699 dev->device_issue_pending = d40_issue_pending;
2700 dev->device_tx_status = d40_tx_status;
2701 dev->device_control = d40_control;
2702 dev->dev = base->dev;
2703}
2704
8d318a50
LW
2705static int __init d40_dmaengine_init(struct d40_base *base,
2706 int num_reserved_chans)
2707{
2708 int err ;
2709
2710 d40_chan_init(base, &base->dma_slave, base->log_chans,
2711 0, base->num_log_chans);
2712
2713 dma_cap_zero(base->dma_slave.cap_mask);
2714 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
0c842b55 2715 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
8d318a50 2716
7ad74a7c 2717 d40_ops_init(base, &base->dma_slave);
8d318a50
LW
2718
2719 err = dma_async_device_register(&base->dma_slave);
2720
2721 if (err) {
6db5a8ba 2722 d40_err(base->dev, "Failed to register slave channels\n");
8d318a50
LW
2723 goto failure1;
2724 }
2725
2726 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2727 base->num_log_chans, base->plat_data->memcpy_len);
2728
2729 dma_cap_zero(base->dma_memcpy.cap_mask);
2730 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
7ad74a7c
RV
2731 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2732
2733 d40_ops_init(base, &base->dma_memcpy);
8d318a50
LW
2734
2735 err = dma_async_device_register(&base->dma_memcpy);
2736
2737 if (err) {
6db5a8ba
RV
2738 d40_err(base->dev,
2739 "Failed to regsiter memcpy only channels\n");
8d318a50
LW
2740 goto failure2;
2741 }
2742
2743 d40_chan_init(base, &base->dma_both, base->phy_chans,
2744 0, num_reserved_chans);
2745
2746 dma_cap_zero(base->dma_both.cap_mask);
2747 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2748 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
7ad74a7c 2749 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
0c842b55 2750 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
7ad74a7c
RV
2751
2752 d40_ops_init(base, &base->dma_both);
8d318a50
LW
2753 err = dma_async_device_register(&base->dma_both);
2754
2755 if (err) {
6db5a8ba
RV
2756 d40_err(base->dev,
2757 "Failed to register logical and physical capable channels\n");
8d318a50
LW
2758 goto failure3;
2759 }
2760 return 0;
2761failure3:
2762 dma_async_device_unregister(&base->dma_memcpy);
2763failure2:
2764 dma_async_device_unregister(&base->dma_slave);
2765failure1:
2766 return err;
2767}
2768
7fb3e75e
N
2769/* Suspend resume functionality */
2770#ifdef CONFIG_PM
2771static int dma40_pm_suspend(struct device *dev)
2772{
28c7a19d
N
2773 struct platform_device *pdev = to_platform_device(dev);
2774 struct d40_base *base = platform_get_drvdata(pdev);
2775 int ret = 0;
7fb3e75e
N
2776 if (!pm_runtime_suspended(dev))
2777 return -EBUSY;
2778
28c7a19d
N
2779 if (base->lcpa_regulator)
2780 ret = regulator_disable(base->lcpa_regulator);
2781 return ret;
7fb3e75e
N
2782}
2783
2784static int dma40_runtime_suspend(struct device *dev)
2785{
2786 struct platform_device *pdev = to_platform_device(dev);
2787 struct d40_base *base = platform_get_drvdata(pdev);
2788
2789 d40_save_restore_registers(base, true);
2790
2791 /* Don't disable/enable clocks for v1 due to HW bugs */
2792 if (base->rev != 1)
2793 writel_relaxed(base->gcc_pwr_off_mask,
2794 base->virtbase + D40_DREG_GCC);
2795
2796 return 0;
2797}
2798
2799static int dma40_runtime_resume(struct device *dev)
2800{
2801 struct platform_device *pdev = to_platform_device(dev);
2802 struct d40_base *base = platform_get_drvdata(pdev);
2803
2804 if (base->initialized)
2805 d40_save_restore_registers(base, false);
2806
2807 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
2808 base->virtbase + D40_DREG_GCC);
2809 return 0;
2810}
2811
28c7a19d
N
2812static int dma40_resume(struct device *dev)
2813{
2814 struct platform_device *pdev = to_platform_device(dev);
2815 struct d40_base *base = platform_get_drvdata(pdev);
2816 int ret = 0;
2817
2818 if (base->lcpa_regulator)
2819 ret = regulator_enable(base->lcpa_regulator);
2820
2821 return ret;
2822}
7fb3e75e
N
2823
2824static const struct dev_pm_ops dma40_pm_ops = {
2825 .suspend = dma40_pm_suspend,
2826 .runtime_suspend = dma40_runtime_suspend,
2827 .runtime_resume = dma40_runtime_resume,
28c7a19d 2828 .resume = dma40_resume,
7fb3e75e
N
2829};
2830#define DMA40_PM_OPS (&dma40_pm_ops)
2831#else
2832#define DMA40_PM_OPS NULL
2833#endif
2834
8d318a50
LW
2835/* Initialization functions. */
2836
2837static int __init d40_phy_res_init(struct d40_base *base)
2838{
2839 int i;
2840 int num_phy_chans_avail = 0;
2841 u32 val[2];
2842 int odd_even_bit = -2;
7fb3e75e 2843 int gcc = D40_DREG_GCC_ENA;
8d318a50
LW
2844
2845 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2846 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2847
2848 for (i = 0; i < base->num_phy_chans; i++) {
2849 base->phy_res[i].num = i;
2850 odd_even_bit += 2 * ((i % 2) == 0);
2851 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2852 /* Mark security only channels as occupied */
2853 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2854 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
7fb3e75e
N
2855 base->phy_res[i].reserved = true;
2856 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
2857 D40_DREG_GCC_SRC);
2858 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
2859 D40_DREG_GCC_DST);
2860
2861
8d318a50
LW
2862 } else {
2863 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2864 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
7fb3e75e 2865 base->phy_res[i].reserved = false;
8d318a50
LW
2866 num_phy_chans_avail++;
2867 }
2868 spin_lock_init(&base->phy_res[i].lock);
2869 }
6b7acd84
JA
2870
2871 /* Mark disabled channels as occupied */
2872 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
f57b407c
RV
2873 int chan = base->plat_data->disabled_channels[i];
2874
2875 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2876 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
7fb3e75e
N
2877 base->phy_res[chan].reserved = true;
2878 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
2879 D40_DREG_GCC_SRC);
2880 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
2881 D40_DREG_GCC_DST);
f57b407c 2882 num_phy_chans_avail--;
6b7acd84
JA
2883 }
2884
8d318a50
LW
2885 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2886 num_phy_chans_avail, base->num_phy_chans);
2887
2888 /* Verify settings extended vs standard */
2889 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2890
2891 for (i = 0; i < base->num_phy_chans; i++) {
2892
2893 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2894 (val[0] & 0x3) != 1)
2895 dev_info(base->dev,
2896 "[%s] INFO: channel %d is misconfigured (%d)\n",
2897 __func__, i, val[0] & 0x3);
2898
2899 val[0] = val[0] >> 2;
2900 }
2901
7fb3e75e
N
2902 /*
2903 * To keep things simple, Enable all clocks initially.
2904 * The clocks will get managed later post channel allocation.
2905 * The clocks for the event lines on which reserved channels exists
2906 * are not managed here.
2907 */
2908 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
2909 base->gcc_pwr_off_mask = gcc;
2910
8d318a50
LW
2911 return num_phy_chans_avail;
2912}
2913
2914static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2915{
8d318a50
LW
2916 struct stedma40_platform_data *plat_data;
2917 struct clk *clk = NULL;
2918 void __iomem *virtbase = NULL;
2919 struct resource *res = NULL;
2920 struct d40_base *base = NULL;
2921 int num_log_chans = 0;
2922 int num_phy_chans;
2923 int i;
f4b89764
LW
2924 u32 pid;
2925 u32 cid;
2926 u8 rev;
8d318a50
LW
2927
2928 clk = clk_get(&pdev->dev, NULL);
2929
2930 if (IS_ERR(clk)) {
6db5a8ba 2931 d40_err(&pdev->dev, "No matching clock found\n");
8d318a50
LW
2932 goto failure;
2933 }
2934
2935 clk_enable(clk);
2936
2937 /* Get IO for DMAC base address */
2938 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2939 if (!res)
2940 goto failure;
2941
2942 if (request_mem_region(res->start, resource_size(res),
2943 D40_NAME " I/O base") == NULL)
2944 goto failure;
2945
2946 virtbase = ioremap(res->start, resource_size(res));
2947 if (!virtbase)
2948 goto failure;
2949
f4b89764
LW
2950 /* This is just a regular AMBA PrimeCell ID actually */
2951 for (pid = 0, i = 0; i < 4; i++)
2952 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2953 & 255) << (i * 8);
2954 for (cid = 0, i = 0; i < 4; i++)
2955 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2956 & 255) << (i * 8);
8d318a50 2957
f4b89764
LW
2958 if (cid != AMBA_CID) {
2959 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2960 goto failure;
2961 }
2962 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
6db5a8ba 2963 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
f4b89764
LW
2964 AMBA_MANF_BITS(pid),
2965 AMBA_VENDOR_ST);
8d318a50
LW
2966 goto failure;
2967 }
f4b89764
LW
2968 /*
2969 * HW revision:
2970 * DB8500ed has revision 0
2971 * ? has revision 1
2972 * DB8500v1 has revision 2
2973 * DB8500v2 has revision 3
2974 */
2975 rev = AMBA_REV_BITS(pid);
3ae0267f 2976
8d318a50
LW
2977 /* The number of physical channels on this HW */
2978 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2979
2980 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
3ae0267f 2981 rev, res->start);
8d318a50 2982
1bdae6f4
N
2983 if (rev < 2) {
2984 d40_err(&pdev->dev, "hardware revision: %d is not supported",
2985 rev);
2986 goto failure;
2987 }
2988
8d318a50
LW
2989 plat_data = pdev->dev.platform_data;
2990
2991 /* Count the number of logical channels in use */
2992 for (i = 0; i < plat_data->dev_len; i++)
2993 if (plat_data->dev_rx[i] != 0)
2994 num_log_chans++;
2995
2996 for (i = 0; i < plat_data->dev_len; i++)
2997 if (plat_data->dev_tx[i] != 0)
2998 num_log_chans++;
2999
3000 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3001 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
3002 sizeof(struct d40_chan), GFP_KERNEL);
3003
3004 if (base == NULL) {
6db5a8ba 3005 d40_err(&pdev->dev, "Out of memory\n");
8d318a50
LW
3006 goto failure;
3007 }
3008
3ae0267f 3009 base->rev = rev;
8d318a50
LW
3010 base->clk = clk;
3011 base->num_phy_chans = num_phy_chans;
3012 base->num_log_chans = num_log_chans;
3013 base->phy_start = res->start;
3014 base->phy_size = resource_size(res);
3015 base->virtbase = virtbase;
3016 base->plat_data = plat_data;
3017 base->dev = &pdev->dev;
3018 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3019 base->log_chans = &base->phy_chans[num_phy_chans];
3020
3021 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3022 GFP_KERNEL);
3023 if (!base->phy_res)
3024 goto failure;
3025
3026 base->lookup_phy_chans = kzalloc(num_phy_chans *
3027 sizeof(struct d40_chan *),
3028 GFP_KERNEL);
3029 if (!base->lookup_phy_chans)
3030 goto failure;
3031
3032 if (num_log_chans + plat_data->memcpy_len) {
3033 /*
3034 * The max number of logical channels are event lines for all
3035 * src devices and dst devices
3036 */
3037 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
3038 sizeof(struct d40_chan *),
3039 GFP_KERNEL);
3040 if (!base->lookup_log_chans)
3041 goto failure;
3042 }
698e4732 3043
7fb3e75e
N
3044 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3045 sizeof(d40_backup_regs_chan),
8d318a50 3046 GFP_KERNEL);
7fb3e75e
N
3047 if (!base->reg_val_backup_chan)
3048 goto failure;
3049
3050 base->lcla_pool.alloc_map =
3051 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3052 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
8d318a50
LW
3053 if (!base->lcla_pool.alloc_map)
3054 goto failure;
3055
c675b1b4
JA
3056 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3057 0, SLAB_HWCACHE_ALIGN,
3058 NULL);
3059 if (base->desc_slab == NULL)
3060 goto failure;
3061
8d318a50
LW
3062 return base;
3063
3064failure:
c6134c96 3065 if (!IS_ERR(clk)) {
8d318a50
LW
3066 clk_disable(clk);
3067 clk_put(clk);
3068 }
3069 if (virtbase)
3070 iounmap(virtbase);
3071 if (res)
3072 release_mem_region(res->start,
3073 resource_size(res));
3074 if (virtbase)
3075 iounmap(virtbase);
3076
3077 if (base) {
3078 kfree(base->lcla_pool.alloc_map);
1bdae6f4 3079 kfree(base->reg_val_backup_chan);
8d318a50
LW
3080 kfree(base->lookup_log_chans);
3081 kfree(base->lookup_phy_chans);
3082 kfree(base->phy_res);
3083 kfree(base);
3084 }
3085
3086 return NULL;
3087}
3088
3089static void __init d40_hw_init(struct d40_base *base)
3090{
3091
7fb3e75e 3092 static struct d40_reg_val dma_init_reg[] = {
8d318a50 3093 /* Clock every part of the DMA block from start */
7fb3e75e 3094 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
8d318a50
LW
3095
3096 /* Interrupts on all logical channels */
3097 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
3098 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
3099 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
3100 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
3101 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
3102 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
3103 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
3104 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
3105 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
3106 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
3107 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
3108 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
3109 };
3110 int i;
3111 u32 prmseo[2] = {0, 0};
3112 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3113 u32 pcmis = 0;
3114 u32 pcicr = 0;
3115
3116 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
3117 writel(dma_init_reg[i].val,
3118 base->virtbase + dma_init_reg[i].reg);
3119
3120 /* Configure all our dma channels to default settings */
3121 for (i = 0; i < base->num_phy_chans; i++) {
3122
3123 activeo[i % 2] = activeo[i % 2] << 2;
3124
3125 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3126 == D40_ALLOC_PHY) {
3127 activeo[i % 2] |= 3;
3128 continue;
3129 }
3130
3131 /* Enable interrupt # */
3132 pcmis = (pcmis << 1) | 1;
3133
3134 /* Clear interrupt # */
3135 pcicr = (pcicr << 1) | 1;
3136
3137 /* Set channel to physical mode */
3138 prmseo[i % 2] = prmseo[i % 2] << 2;
3139 prmseo[i % 2] |= 1;
3140
3141 }
3142
3143 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3144 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3145 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3146 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3147
3148 /* Write which interrupt to enable */
3149 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
3150
3151 /* Write which interrupt to clear */
3152 writel(pcicr, base->virtbase + D40_DREG_PCICR);
3153
3154}
3155
508849ad
LW
3156static int __init d40_lcla_allocate(struct d40_base *base)
3157{
026cbc42 3158 struct d40_lcla_pool *pool = &base->lcla_pool;
508849ad
LW
3159 unsigned long *page_list;
3160 int i, j;
3161 int ret = 0;
3162
3163 /*
3164 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3165 * To full fill this hardware requirement without wasting 256 kb
3166 * we allocate pages until we get an aligned one.
3167 */
3168 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3169 GFP_KERNEL);
3170
3171 if (!page_list) {
3172 ret = -ENOMEM;
3173 goto failure;
3174 }
3175
3176 /* Calculating how many pages that are required */
3177 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3178
3179 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3180 page_list[i] = __get_free_pages(GFP_KERNEL,
3181 base->lcla_pool.pages);
3182 if (!page_list[i]) {
3183
6db5a8ba
RV
3184 d40_err(base->dev, "Failed to allocate %d pages.\n",
3185 base->lcla_pool.pages);
508849ad
LW
3186
3187 for (j = 0; j < i; j++)
3188 free_pages(page_list[j], base->lcla_pool.pages);
3189 goto failure;
3190 }
3191
3192 if ((virt_to_phys((void *)page_list[i]) &
3193 (LCLA_ALIGNMENT - 1)) == 0)
3194 break;
3195 }
3196
3197 for (j = 0; j < i; j++)
3198 free_pages(page_list[j], base->lcla_pool.pages);
3199
3200 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3201 base->lcla_pool.base = (void *)page_list[i];
3202 } else {
767a9675
JA
3203 /*
3204 * After many attempts and no succees with finding the correct
3205 * alignment, try with allocating a big buffer.
3206 */
508849ad
LW
3207 dev_warn(base->dev,
3208 "[%s] Failed to get %d pages @ 18 bit align.\n",
3209 __func__, base->lcla_pool.pages);
3210 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3211 base->num_phy_chans +
3212 LCLA_ALIGNMENT,
3213 GFP_KERNEL);
3214 if (!base->lcla_pool.base_unaligned) {
3215 ret = -ENOMEM;
3216 goto failure;
3217 }
3218
3219 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3220 LCLA_ALIGNMENT);
3221 }
3222
026cbc42
RV
3223 pool->dma_addr = dma_map_single(base->dev, pool->base,
3224 SZ_1K * base->num_phy_chans,
3225 DMA_TO_DEVICE);
3226 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3227 pool->dma_addr = 0;
3228 ret = -ENOMEM;
3229 goto failure;
3230 }
3231
508849ad
LW
3232 writel(virt_to_phys(base->lcla_pool.base),
3233 base->virtbase + D40_DREG_LCLA);
3234failure:
3235 kfree(page_list);
3236 return ret;
3237}
3238
8d318a50
LW
3239static int __init d40_probe(struct platform_device *pdev)
3240{
3241 int err;
3242 int ret = -ENOENT;
3243 struct d40_base *base;
3244 struct resource *res = NULL;
3245 int num_reserved_chans;
3246 u32 val;
3247
3248 base = d40_hw_detect_init(pdev);
3249
3250 if (!base)
3251 goto failure;
3252
3253 num_reserved_chans = d40_phy_res_init(base);
3254
3255 platform_set_drvdata(pdev, base);
3256
3257 spin_lock_init(&base->interrupt_lock);
3258 spin_lock_init(&base->execmd_lock);
3259
3260 /* Get IO for logical channel parameter address */
3261 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3262 if (!res) {
3263 ret = -ENOENT;
6db5a8ba 3264 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
8d318a50
LW
3265 goto failure;
3266 }
3267 base->lcpa_size = resource_size(res);
3268 base->phy_lcpa = res->start;
3269
3270 if (request_mem_region(res->start, resource_size(res),
3271 D40_NAME " I/O lcpa") == NULL) {
3272 ret = -EBUSY;
6db5a8ba
RV
3273 d40_err(&pdev->dev,
3274 "Failed to request LCPA region 0x%x-0x%x\n",
3275 res->start, res->end);
8d318a50
LW
3276 goto failure;
3277 }
3278
3279 /* We make use of ESRAM memory for this. */
3280 val = readl(base->virtbase + D40_DREG_LCPA);
3281 if (res->start != val && val != 0) {
3282 dev_warn(&pdev->dev,
3283 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3284 __func__, val, res->start);
3285 } else
3286 writel(res->start, base->virtbase + D40_DREG_LCPA);
3287
3288 base->lcpa_base = ioremap(res->start, resource_size(res));
3289 if (!base->lcpa_base) {
3290 ret = -ENOMEM;
6db5a8ba 3291 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
8d318a50
LW
3292 goto failure;
3293 }
28c7a19d
N
3294 /* If lcla has to be located in ESRAM we don't need to allocate */
3295 if (base->plat_data->use_esram_lcla) {
3296 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3297 "lcla_esram");
3298 if (!res) {
3299 ret = -ENOENT;
3300 d40_err(&pdev->dev,
3301 "No \"lcla_esram\" memory resource\n");
3302 goto failure;
3303 }
3304 base->lcla_pool.base = ioremap(res->start,
3305 resource_size(res));
3306 if (!base->lcla_pool.base) {
3307 ret = -ENOMEM;
3308 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3309 goto failure;
3310 }
3311 writel(res->start, base->virtbase + D40_DREG_LCLA);
8d318a50 3312
28c7a19d
N
3313 } else {
3314 ret = d40_lcla_allocate(base);
3315 if (ret) {
3316 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3317 goto failure;
3318 }
8d318a50
LW
3319 }
3320
3321 spin_lock_init(&base->lcla_pool.lock);
3322
8d318a50
LW
3323 base->irq = platform_get_irq(pdev, 0);
3324
3325 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
8d318a50 3326 if (ret) {
6db5a8ba 3327 d40_err(&pdev->dev, "No IRQ defined\n");
8d318a50
LW
3328 goto failure;
3329 }
3330
7fb3e75e
N
3331 pm_runtime_irq_safe(base->dev);
3332 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3333 pm_runtime_use_autosuspend(base->dev);
3334 pm_runtime_enable(base->dev);
3335 pm_runtime_resume(base->dev);
28c7a19d
N
3336
3337 if (base->plat_data->use_esram_lcla) {
3338
3339 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3340 if (IS_ERR(base->lcpa_regulator)) {
3341 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3342 base->lcpa_regulator = NULL;
3343 goto failure;
3344 }
3345
3346 ret = regulator_enable(base->lcpa_regulator);
3347 if (ret) {
3348 d40_err(&pdev->dev,
3349 "Failed to enable lcpa_regulator\n");
3350 regulator_put(base->lcpa_regulator);
3351 base->lcpa_regulator = NULL;
3352 goto failure;
3353 }
3354 }
3355
7fb3e75e 3356 base->initialized = true;
8d318a50
LW
3357 err = d40_dmaengine_init(base, num_reserved_chans);
3358 if (err)
3359 goto failure;
3360
3361 d40_hw_init(base);
3362
3363 dev_info(base->dev, "initialized\n");
3364 return 0;
3365
3366failure:
3367 if (base) {
c675b1b4
JA
3368 if (base->desc_slab)
3369 kmem_cache_destroy(base->desc_slab);
8d318a50
LW
3370 if (base->virtbase)
3371 iounmap(base->virtbase);
026cbc42 3372
28c7a19d
N
3373 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3374 iounmap(base->lcla_pool.base);
3375 base->lcla_pool.base = NULL;
3376 }
3377
026cbc42
RV
3378 if (base->lcla_pool.dma_addr)
3379 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3380 SZ_1K * base->num_phy_chans,
3381 DMA_TO_DEVICE);
3382
508849ad
LW
3383 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3384 free_pages((unsigned long)base->lcla_pool.base,
3385 base->lcla_pool.pages);
767a9675
JA
3386
3387 kfree(base->lcla_pool.base_unaligned);
3388
8d318a50
LW
3389 if (base->phy_lcpa)
3390 release_mem_region(base->phy_lcpa,
3391 base->lcpa_size);
3392 if (base->phy_start)
3393 release_mem_region(base->phy_start,
3394 base->phy_size);
3395 if (base->clk) {
3396 clk_disable(base->clk);
3397 clk_put(base->clk);
3398 }
3399
28c7a19d
N
3400 if (base->lcpa_regulator) {
3401 regulator_disable(base->lcpa_regulator);
3402 regulator_put(base->lcpa_regulator);
3403 }
3404
8d318a50
LW
3405 kfree(base->lcla_pool.alloc_map);
3406 kfree(base->lookup_log_chans);
3407 kfree(base->lookup_phy_chans);
3408 kfree(base->phy_res);
3409 kfree(base);
3410 }
3411
6db5a8ba 3412 d40_err(&pdev->dev, "probe failed\n");
8d318a50
LW
3413 return ret;
3414}
3415
3416static struct platform_driver d40_driver = {
3417 .driver = {
3418 .owner = THIS_MODULE,
3419 .name = D40_NAME,
7fb3e75e 3420 .pm = DMA40_PM_OPS,
8d318a50
LW
3421 },
3422};
3423
cb9ab2d8 3424static int __init stedma40_init(void)
8d318a50
LW
3425{
3426 return platform_driver_probe(&d40_driver, d40_probe);
3427}
a0eb221a 3428subsys_initcall(stedma40_init);
This page took 0.33012 seconds and 5 git commands to generate.