2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/err.h>
20 #include <linux/amba/bus.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/platform_data/dma-ste-dma40.h>
24 #include "dmaengine.h"
25 #include "ste_dma40_ll.h"
27 #define D40_NAME "dma40"
29 #define D40_PHY_CHAN -1
31 /* For masking out/in 2 bit channel positions */
32 #define D40_CHAN_POS(chan) (2 * (chan / 2))
33 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
35 /* Maximum iterations taken before giving up suspending a channel */
36 #define D40_SUSPEND_MAX_IT 500
39 #define DMA40_AUTOSUSPEND_DELAY 100
41 /* Hardware requirement on LCLA alignment */
42 #define LCLA_ALIGNMENT 0x40000
44 /* Max number of links per event group */
45 #define D40_LCLA_LINK_PER_EVENT_GRP 128
46 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
48 /* Attempts before giving up to trying to get pages that are aligned */
49 #define MAX_LCLA_ALLOC_ATTEMPTS 256
51 /* Bit markings for allocation map */
52 #define D40_ALLOC_FREE (1 << 31)
53 #define D40_ALLOC_PHY (1 << 30)
54 #define D40_ALLOC_LOG_FREE 0
56 #define MAX(a, b) (((a) < (b)) ? (b) : (a))
59 * enum 40_command - The different commands and/or statuses.
61 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
62 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
63 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
64 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
69 D40_DMA_SUSPEND_REQ
= 2,
74 * enum d40_events - The different Event Enables for the event lines.
76 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
77 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
78 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
79 * @D40_ROUND_EVENTLINE: Status check for event line.
83 D40_DEACTIVATE_EVENTLINE
= 0,
84 D40_ACTIVATE_EVENTLINE
= 1,
85 D40_SUSPEND_REQ_EVENTLINE
= 2,
86 D40_ROUND_EVENTLINE
= 3
90 * These are the registers that has to be saved and later restored
91 * when the DMA hw is powered off.
92 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
94 static u32 d40_backup_regs
[] = {
103 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
106 * since 9540 and 8540 has the same HW revision
107 * use v4a for 9540 or ealier
108 * use v4b for 8540 or later
110 * DB8500ed has revision 0
111 * DB8500v1 has revision 2
112 * DB8500v2 has revision 3
113 * AP9540v1 has revision 4
114 * DB8540v1 has revision 4
115 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
117 static u32 d40_backup_regs_v4a
[] = {
136 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
138 static u32 d40_backup_regs_v4b
[] = {
161 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
163 static u32 d40_backup_regs_chan
[] = {
175 * struct d40_interrupt_lookup - lookup table for interrupt handler
177 * @src: Interrupt mask register.
178 * @clr: Interrupt clear register.
179 * @is_error: true if this is an error interrupt.
180 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
181 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
183 struct d40_interrupt_lookup
{
191 static struct d40_interrupt_lookup il_v4a
[] = {
192 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
193 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
194 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
195 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
196 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
197 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
198 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
199 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
200 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
201 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
204 static struct d40_interrupt_lookup il_v4b
[] = {
205 {D40_DREG_CLCTIS1
, D40_DREG_CLCICR1
, false, 0},
206 {D40_DREG_CLCTIS2
, D40_DREG_CLCICR2
, false, 32},
207 {D40_DREG_CLCTIS3
, D40_DREG_CLCICR3
, false, 64},
208 {D40_DREG_CLCTIS4
, D40_DREG_CLCICR4
, false, 96},
209 {D40_DREG_CLCTIS5
, D40_DREG_CLCICR5
, false, 128},
210 {D40_DREG_CLCEIS1
, D40_DREG_CLCICR1
, true, 0},
211 {D40_DREG_CLCEIS2
, D40_DREG_CLCICR2
, true, 32},
212 {D40_DREG_CLCEIS3
, D40_DREG_CLCICR3
, true, 64},
213 {D40_DREG_CLCEIS4
, D40_DREG_CLCICR4
, true, 96},
214 {D40_DREG_CLCEIS5
, D40_DREG_CLCICR5
, true, 128},
215 {D40_DREG_CPCTIS
, D40_DREG_CPCICR
, false, D40_PHY_CHAN
},
216 {D40_DREG_CPCEIS
, D40_DREG_CPCICR
, true, D40_PHY_CHAN
},
220 * struct d40_reg_val - simple lookup struct
222 * @reg: The register.
223 * @val: The value that belongs to the register in reg.
230 static __initdata
struct d40_reg_val dma_init_reg_v4a
[] = {
231 /* Clock every part of the DMA block from start */
232 { .reg
= D40_DREG_GCC
, .val
= D40_DREG_GCC_ENABLE_ALL
},
234 /* Interrupts on all logical channels */
235 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
236 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
237 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
238 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
239 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
240 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
241 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
242 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
243 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
244 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
245 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
246 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
248 static __initdata
struct d40_reg_val dma_init_reg_v4b
[] = {
249 /* Clock every part of the DMA block from start */
250 { .reg
= D40_DREG_GCC
, .val
= D40_DREG_GCC_ENABLE_ALL
},
252 /* Interrupts on all logical channels */
253 { .reg
= D40_DREG_CLCMIS1
, .val
= 0xFFFFFFFF},
254 { .reg
= D40_DREG_CLCMIS2
, .val
= 0xFFFFFFFF},
255 { .reg
= D40_DREG_CLCMIS3
, .val
= 0xFFFFFFFF},
256 { .reg
= D40_DREG_CLCMIS4
, .val
= 0xFFFFFFFF},
257 { .reg
= D40_DREG_CLCMIS5
, .val
= 0xFFFFFFFF},
258 { .reg
= D40_DREG_CLCICR1
, .val
= 0xFFFFFFFF},
259 { .reg
= D40_DREG_CLCICR2
, .val
= 0xFFFFFFFF},
260 { .reg
= D40_DREG_CLCICR3
, .val
= 0xFFFFFFFF},
261 { .reg
= D40_DREG_CLCICR4
, .val
= 0xFFFFFFFF},
262 { .reg
= D40_DREG_CLCICR5
, .val
= 0xFFFFFFFF},
263 { .reg
= D40_DREG_CLCTIS1
, .val
= 0xFFFFFFFF},
264 { .reg
= D40_DREG_CLCTIS2
, .val
= 0xFFFFFFFF},
265 { .reg
= D40_DREG_CLCTIS3
, .val
= 0xFFFFFFFF},
266 { .reg
= D40_DREG_CLCTIS4
, .val
= 0xFFFFFFFF},
267 { .reg
= D40_DREG_CLCTIS5
, .val
= 0xFFFFFFFF}
271 * struct d40_lli_pool - Structure for keeping LLIs in memory
273 * @base: Pointer to memory area when the pre_alloc_lli's are not large
274 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
275 * pre_alloc_lli is used.
276 * @dma_addr: DMA address, if mapped
277 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
278 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
279 * one buffer to one buffer.
281 struct d40_lli_pool
{
285 /* Space for dst and src, plus an extra for padding */
286 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
290 * struct d40_desc - A descriptor is one DMA job.
292 * @lli_phy: LLI settings for physical channel. Both src and dst=
293 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
294 * lli_len equals one.
295 * @lli_log: Same as above but for logical channels.
296 * @lli_pool: The pool with two entries pre-allocated.
297 * @lli_len: Number of llis of current descriptor.
298 * @lli_current: Number of transferred llis.
299 * @lcla_alloc: Number of LCLA entries allocated.
300 * @txd: DMA engine struct. Used for among other things for communication
303 * @is_in_client_list: true if the client owns this descriptor.
304 * @cyclic: true if this is a cyclic job
306 * This descriptor is used for both logical and physical transfers.
310 struct d40_phy_lli_bidir lli_phy
;
312 struct d40_log_lli_bidir lli_log
;
314 struct d40_lli_pool lli_pool
;
319 struct dma_async_tx_descriptor txd
;
320 struct list_head node
;
322 bool is_in_client_list
;
327 * struct d40_lcla_pool - LCLA pool settings and data.
329 * @base: The virtual address of LCLA. 18 bit aligned.
330 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
331 * This pointer is only there for clean-up on error.
332 * @pages: The number of pages needed for all physical channels.
333 * Only used later for clean-up on error
334 * @lock: Lock to protect the content in this struct.
335 * @alloc_map: big map over which LCLA entry is own by which job.
337 struct d40_lcla_pool
{
340 void *base_unaligned
;
343 struct d40_desc
**alloc_map
;
347 * struct d40_phy_res - struct for handling eventlines mapped to physical
350 * @lock: A lock protection this entity.
351 * @reserved: True if used by secure world or otherwise.
352 * @num: The physical channel number of this entity.
353 * @allocated_src: Bit mapped to show which src event line's are mapped to
354 * this physical channel. Can also be free or physically allocated.
355 * @allocated_dst: Same as for src but is dst.
356 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
358 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
372 * struct d40_chan - Struct that describes a channel.
374 * @lock: A spinlock to protect this struct.
375 * @log_num: The logical number, if any of this channel.
376 * @pending_tx: The number of pending transfers. Used between interrupt handler
378 * @busy: Set to true when transfer is ongoing on this channel.
379 * @phy_chan: Pointer to physical channel which this instance runs on. If this
380 * point is NULL, then the channel is not allocated.
381 * @chan: DMA engine handle.
382 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
383 * transfer and call client callback.
384 * @client: Cliented owned descriptor list.
385 * @pending_queue: Submitted jobs, to be issued by issue_pending()
386 * @active: Active descriptor.
387 * @done: Completed jobs
388 * @queue: Queued jobs.
389 * @prepare_queue: Prepared jobs.
390 * @dma_cfg: The client configuration of this dma channel.
391 * @configured: whether the dma_cfg configuration is valid
392 * @base: Pointer to the device instance struct.
393 * @src_def_cfg: Default cfg register setting for src.
394 * @dst_def_cfg: Default cfg register setting for dst.
395 * @log_def: Default logical channel settings.
396 * @lcpa: Pointer to dst and src lcpa settings.
397 * @runtime_addr: runtime configured address.
398 * @runtime_direction: runtime configured direction.
400 * This struct can either "be" a logical or a physical channel.
407 struct d40_phy_res
*phy_chan
;
408 struct dma_chan chan
;
409 struct tasklet_struct tasklet
;
410 struct list_head client
;
411 struct list_head pending_queue
;
412 struct list_head active
;
413 struct list_head done
;
414 struct list_head queue
;
415 struct list_head prepare_queue
;
416 struct stedma40_chan_cfg dma_cfg
;
418 struct d40_base
*base
;
419 /* Default register configurations */
422 struct d40_def_lcsp log_def
;
423 struct d40_log_lli_full
*lcpa
;
424 /* Runtime reconfiguration */
425 dma_addr_t runtime_addr
;
426 enum dma_transfer_direction runtime_direction
;
430 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
433 * @backup: the pointer to the registers address array for backup
434 * @backup_size: the size of the registers address array for backup
435 * @realtime_en: the realtime enable register
436 * @realtime_clear: the realtime clear register
437 * @high_prio_en: the high priority enable register
438 * @high_prio_clear: the high priority clear register
439 * @interrupt_en: the interrupt enable register
440 * @interrupt_clear: the interrupt clear register
441 * @il: the pointer to struct d40_interrupt_lookup
442 * @il_size: the size of d40_interrupt_lookup array
443 * @init_reg: the pointer to the struct d40_reg_val
444 * @init_reg_size: the size of d40_reg_val array
446 struct d40_gen_dmac
{
455 struct d40_interrupt_lookup
*il
;
457 struct d40_reg_val
*init_reg
;
462 * struct d40_base - The big global struct, one for each probe'd instance.
464 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
465 * @execmd_lock: Lock for execute command usage since several channels share
466 * the same physical register.
467 * @dev: The device structure.
468 * @virtbase: The virtual base address of the DMA's register.
469 * @rev: silicon revision detected.
470 * @clk: Pointer to the DMA clock structure.
471 * @phy_start: Physical memory start of the DMA registers.
472 * @phy_size: Size of the DMA register map.
473 * @irq: The IRQ number.
474 * @num_phy_chans: The number of physical channels. Read from HW. This
475 * is the number of available channels for this driver, not counting "Secure
476 * mode" allocated physical channels.
477 * @num_log_chans: The number of logical channels. Calculated from
479 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
480 * @dma_slave: dma_device channels that can do only do slave transfers.
481 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
482 * @phy_chans: Room for all possible physical channels in system.
483 * @log_chans: Room for all possible logical channels in system.
484 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
485 * to log_chans entries.
486 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
487 * to phy_chans entries.
488 * @plat_data: Pointer to provided platform_data which is the driver
490 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
491 * @phy_res: Vector containing all physical channels.
492 * @lcla_pool: lcla pool settings and data.
493 * @lcpa_base: The virtual mapped address of LCPA.
494 * @phy_lcpa: The physical address of the LCPA.
495 * @lcpa_size: The size of the LCPA area.
496 * @desc_slab: cache for descriptors.
497 * @reg_val_backup: Here the values of some hardware registers are stored
498 * before the DMA is powered off. They are restored when the power is back on.
499 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
501 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
502 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
503 * @initialized: true if the dma has been initialized
504 * @gen_dmac: the struct for generic registers values to represent u8500/8540
508 spinlock_t interrupt_lock
;
509 spinlock_t execmd_lock
;
511 void __iomem
*virtbase
;
514 phys_addr_t phy_start
;
515 resource_size_t phy_size
;
519 struct device_dma_parameters dma_parms
;
520 struct dma_device dma_both
;
521 struct dma_device dma_slave
;
522 struct dma_device dma_memcpy
;
523 struct d40_chan
*phy_chans
;
524 struct d40_chan
*log_chans
;
525 struct d40_chan
**lookup_log_chans
;
526 struct d40_chan
**lookup_phy_chans
;
527 struct stedma40_platform_data
*plat_data
;
528 struct regulator
*lcpa_regulator
;
529 /* Physical half channels */
530 struct d40_phy_res
*phy_res
;
531 struct d40_lcla_pool lcla_pool
;
534 resource_size_t lcpa_size
;
535 struct kmem_cache
*desc_slab
;
536 u32 reg_val_backup
[BACKUP_REGS_SZ
];
537 u32 reg_val_backup_v4
[MAX(BACKUP_REGS_SZ_V4A
, BACKUP_REGS_SZ_V4B
)];
538 u32
*reg_val_backup_chan
;
539 u16 gcc_pwr_off_mask
;
541 struct d40_gen_dmac gen_dmac
;
544 static struct device
*chan2dev(struct d40_chan
*d40c
)
546 return &d40c
->chan
.dev
->device
;
549 static bool chan_is_physical(struct d40_chan
*chan
)
551 return chan
->log_num
== D40_PHY_CHAN
;
554 static bool chan_is_logical(struct d40_chan
*chan
)
556 return !chan_is_physical(chan
);
559 static void __iomem
*chan_base(struct d40_chan
*chan
)
561 return chan
->base
->virtbase
+ D40_DREG_PCBASE
+
562 chan
->phy_chan
->num
* D40_DREG_PCDELTA
;
565 #define d40_err(dev, format, arg...) \
566 dev_err(dev, "[%s] " format, __func__, ## arg)
568 #define chan_err(d40c, format, arg...) \
569 d40_err(chan2dev(d40c), format, ## arg)
571 static int d40_pool_lli_alloc(struct d40_chan
*d40c
, struct d40_desc
*d40d
,
574 bool is_log
= chan_is_logical(d40c
);
579 align
= sizeof(struct d40_log_lli
);
581 align
= sizeof(struct d40_phy_lli
);
584 base
= d40d
->lli_pool
.pre_alloc_lli
;
585 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
586 d40d
->lli_pool
.base
= NULL
;
588 d40d
->lli_pool
.size
= lli_len
* 2 * align
;
590 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
591 d40d
->lli_pool
.base
= base
;
593 if (d40d
->lli_pool
.base
== NULL
)
598 d40d
->lli_log
.src
= PTR_ALIGN(base
, align
);
599 d40d
->lli_log
.dst
= d40d
->lli_log
.src
+ lli_len
;
601 d40d
->lli_pool
.dma_addr
= 0;
603 d40d
->lli_phy
.src
= PTR_ALIGN(base
, align
);
604 d40d
->lli_phy
.dst
= d40d
->lli_phy
.src
+ lli_len
;
606 d40d
->lli_pool
.dma_addr
= dma_map_single(d40c
->base
->dev
,
611 if (dma_mapping_error(d40c
->base
->dev
,
612 d40d
->lli_pool
.dma_addr
)) {
613 kfree(d40d
->lli_pool
.base
);
614 d40d
->lli_pool
.base
= NULL
;
615 d40d
->lli_pool
.dma_addr
= 0;
623 static void d40_pool_lli_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
625 if (d40d
->lli_pool
.dma_addr
)
626 dma_unmap_single(d40c
->base
->dev
, d40d
->lli_pool
.dma_addr
,
627 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
629 kfree(d40d
->lli_pool
.base
);
630 d40d
->lli_pool
.base
= NULL
;
631 d40d
->lli_pool
.size
= 0;
632 d40d
->lli_log
.src
= NULL
;
633 d40d
->lli_log
.dst
= NULL
;
634 d40d
->lli_phy
.src
= NULL
;
635 d40d
->lli_phy
.dst
= NULL
;
638 static int d40_lcla_alloc_one(struct d40_chan
*d40c
,
639 struct d40_desc
*d40d
)
645 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
648 * Allocate both src and dst at the same time, therefore the half
649 * start on 1 since 0 can't be used since zero is used as end marker.
651 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
652 int idx
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
+ i
;
654 if (!d40c
->base
->lcla_pool
.alloc_map
[idx
]) {
655 d40c
->base
->lcla_pool
.alloc_map
[idx
] = d40d
;
662 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
667 static int d40_lcla_free_all(struct d40_chan
*d40c
,
668 struct d40_desc
*d40d
)
674 if (chan_is_physical(d40c
))
677 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
679 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
680 int idx
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
+ i
;
682 if (d40c
->base
->lcla_pool
.alloc_map
[idx
] == d40d
) {
683 d40c
->base
->lcla_pool
.alloc_map
[idx
] = NULL
;
685 if (d40d
->lcla_alloc
== 0) {
692 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
698 static void d40_desc_remove(struct d40_desc
*d40d
)
700 list_del(&d40d
->node
);
703 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
705 struct d40_desc
*desc
= NULL
;
707 if (!list_empty(&d40c
->client
)) {
711 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
712 if (async_tx_test_ack(&d
->txd
)) {
715 memset(desc
, 0, sizeof(*desc
));
722 desc
= kmem_cache_zalloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
725 INIT_LIST_HEAD(&desc
->node
);
730 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
733 d40_pool_lli_free(d40c
, d40d
);
734 d40_lcla_free_all(d40c
, d40d
);
735 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
738 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
740 list_add_tail(&desc
->node
, &d40c
->active
);
743 static void d40_phy_lli_load(struct d40_chan
*chan
, struct d40_desc
*desc
)
745 struct d40_phy_lli
*lli_dst
= desc
->lli_phy
.dst
;
746 struct d40_phy_lli
*lli_src
= desc
->lli_phy
.src
;
747 void __iomem
*base
= chan_base(chan
);
749 writel(lli_src
->reg_cfg
, base
+ D40_CHAN_REG_SSCFG
);
750 writel(lli_src
->reg_elt
, base
+ D40_CHAN_REG_SSELT
);
751 writel(lli_src
->reg_ptr
, base
+ D40_CHAN_REG_SSPTR
);
752 writel(lli_src
->reg_lnk
, base
+ D40_CHAN_REG_SSLNK
);
754 writel(lli_dst
->reg_cfg
, base
+ D40_CHAN_REG_SDCFG
);
755 writel(lli_dst
->reg_elt
, base
+ D40_CHAN_REG_SDELT
);
756 writel(lli_dst
->reg_ptr
, base
+ D40_CHAN_REG_SDPTR
);
757 writel(lli_dst
->reg_lnk
, base
+ D40_CHAN_REG_SDLNK
);
760 static void d40_desc_done(struct d40_chan
*d40c
, struct d40_desc
*desc
)
762 list_add_tail(&desc
->node
, &d40c
->done
);
765 static void d40_log_lli_to_lcxa(struct d40_chan
*chan
, struct d40_desc
*desc
)
767 struct d40_lcla_pool
*pool
= &chan
->base
->lcla_pool
;
768 struct d40_log_lli_bidir
*lli
= &desc
->lli_log
;
769 int lli_current
= desc
->lli_current
;
770 int lli_len
= desc
->lli_len
;
771 bool cyclic
= desc
->cyclic
;
772 int curr_lcla
= -EINVAL
;
774 bool use_esram_lcla
= chan
->base
->plat_data
->use_esram_lcla
;
778 * We may have partially running cyclic transfers, in case we did't get
779 * enough LCLA entries.
781 linkback
= cyclic
&& lli_current
== 0;
784 * For linkback, we need one LCLA even with only one link, because we
785 * can't link back to the one in LCPA space
787 if (linkback
|| (lli_len
- lli_current
> 1)) {
789 * If the channel is expected to use only soft_lli don't
790 * allocate a lcla. This is to avoid a HW issue that exists
791 * in some controller during a peripheral to memory transfer
792 * that uses linked lists.
794 if (!(chan
->phy_chan
->use_soft_lli
&&
795 chan
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
))
796 curr_lcla
= d40_lcla_alloc_one(chan
, desc
);
798 first_lcla
= curr_lcla
;
802 * For linkback, we normally load the LCPA in the loop since we need to
803 * link it to the second LCLA and not the first. However, if we
804 * couldn't even get a first LCLA, then we have to run in LCPA and
807 if (!linkback
|| curr_lcla
== -EINVAL
) {
808 unsigned int flags
= 0;
810 if (curr_lcla
== -EINVAL
)
811 flags
|= LLI_TERM_INT
;
813 d40_log_lli_lcpa_write(chan
->lcpa
,
814 &lli
->dst
[lli_current
],
815 &lli
->src
[lli_current
],
824 for (; lli_current
< lli_len
; lli_current
++) {
825 unsigned int lcla_offset
= chan
->phy_chan
->num
* 1024 +
827 struct d40_log_lli
*lcla
= pool
->base
+ lcla_offset
;
828 unsigned int flags
= 0;
831 if (lli_current
+ 1 < lli_len
)
832 next_lcla
= d40_lcla_alloc_one(chan
, desc
);
834 next_lcla
= linkback
? first_lcla
: -EINVAL
;
836 if (cyclic
|| next_lcla
== -EINVAL
)
837 flags
|= LLI_TERM_INT
;
839 if (linkback
&& curr_lcla
== first_lcla
) {
840 /* First link goes in both LCPA and LCLA */
841 d40_log_lli_lcpa_write(chan
->lcpa
,
842 &lli
->dst
[lli_current
],
843 &lli
->src
[lli_current
],
848 * One unused LCLA in the cyclic case if the very first
851 d40_log_lli_lcla_write(lcla
,
852 &lli
->dst
[lli_current
],
853 &lli
->src
[lli_current
],
857 * Cache maintenance is not needed if lcla is
860 if (!use_esram_lcla
) {
861 dma_sync_single_range_for_device(chan
->base
->dev
,
862 pool
->dma_addr
, lcla_offset
,
863 2 * sizeof(struct d40_log_lli
),
866 curr_lcla
= next_lcla
;
868 if (curr_lcla
== -EINVAL
|| curr_lcla
== first_lcla
) {
875 desc
->lli_current
= lli_current
;
878 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
880 if (chan_is_physical(d40c
)) {
881 d40_phy_lli_load(d40c
, d40d
);
882 d40d
->lli_current
= d40d
->lli_len
;
884 d40_log_lli_to_lcxa(d40c
, d40d
);
887 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
891 if (list_empty(&d40c
->active
))
894 d
= list_first_entry(&d40c
->active
,
900 /* remove desc from current queue and add it to the pending_queue */
901 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
903 d40_desc_remove(desc
);
904 desc
->is_in_client_list
= false;
905 list_add_tail(&desc
->node
, &d40c
->pending_queue
);
908 static struct d40_desc
*d40_first_pending(struct d40_chan
*d40c
)
912 if (list_empty(&d40c
->pending_queue
))
915 d
= list_first_entry(&d40c
->pending_queue
,
921 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
925 if (list_empty(&d40c
->queue
))
928 d
= list_first_entry(&d40c
->queue
,
934 static struct d40_desc
*d40_first_done(struct d40_chan
*d40c
)
936 if (list_empty(&d40c
->done
))
939 return list_first_entry(&d40c
->done
, struct d40_desc
, node
);
942 static int d40_psize_2_burst_size(bool is_log
, int psize
)
945 if (psize
== STEDMA40_PSIZE_LOG_1
)
948 if (psize
== STEDMA40_PSIZE_PHY_1
)
956 * The dma only supports transmitting packages up to
957 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
958 * dma elements required to send the entire sg list
960 static int d40_size_2_dmalen(int size
, u32 data_width1
, u32 data_width2
)
963 u32 max_w
= max(data_width1
, data_width2
);
964 u32 min_w
= min(data_width1
, data_width2
);
965 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
967 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
968 seg_max
-= (1 << max_w
);
970 if (!IS_ALIGNED(size
, 1 << max_w
))
976 dmalen
= size
/ seg_max
;
977 if (dmalen
* seg_max
< size
)
983 static int d40_sg_2_dmalen(struct scatterlist
*sgl
, int sg_len
,
984 u32 data_width1
, u32 data_width2
)
986 struct scatterlist
*sg
;
991 for_each_sg(sgl
, sg
, sg_len
, i
) {
992 ret
= d40_size_2_dmalen(sg_dma_len(sg
),
993 data_width1
, data_width2
);
1003 static void dma40_backup(void __iomem
*baseaddr
, u32
*backup
,
1004 u32
*regaddr
, int num
, bool save
)
1008 for (i
= 0; i
< num
; i
++) {
1009 void __iomem
*addr
= baseaddr
+ regaddr
[i
];
1012 backup
[i
] = readl_relaxed(addr
);
1014 writel_relaxed(backup
[i
], addr
);
1018 static void d40_save_restore_registers(struct d40_base
*base
, bool save
)
1022 /* Save/Restore channel specific registers */
1023 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
1027 if (base
->phy_res
[i
].reserved
)
1030 addr
= base
->virtbase
+ D40_DREG_PCBASE
+ i
* D40_DREG_PCDELTA
;
1031 idx
= i
* ARRAY_SIZE(d40_backup_regs_chan
);
1033 dma40_backup(addr
, &base
->reg_val_backup_chan
[idx
],
1034 d40_backup_regs_chan
,
1035 ARRAY_SIZE(d40_backup_regs_chan
),
1039 /* Save/Restore global registers */
1040 dma40_backup(base
->virtbase
, base
->reg_val_backup
,
1041 d40_backup_regs
, ARRAY_SIZE(d40_backup_regs
),
1044 /* Save/Restore registers only existing on dma40 v3 and later */
1045 if (base
->gen_dmac
.backup
)
1046 dma40_backup(base
->virtbase
, base
->reg_val_backup_v4
,
1047 base
->gen_dmac
.backup
,
1048 base
->gen_dmac
.backup_size
,
1052 static void d40_save_restore_registers(struct d40_base
*base
, bool save
)
1057 static int __d40_execute_command_phy(struct d40_chan
*d40c
,
1058 enum d40_command command
)
1062 void __iomem
*active_reg
;
1064 unsigned long flags
;
1067 if (command
== D40_DMA_STOP
) {
1068 ret
= __d40_execute_command_phy(d40c
, D40_DMA_SUSPEND_REQ
);
1073 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
1075 if (d40c
->phy_chan
->num
% 2 == 0)
1076 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1078 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1080 if (command
== D40_DMA_SUSPEND_REQ
) {
1081 status
= (readl(active_reg
) &
1082 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1083 D40_CHAN_POS(d40c
->phy_chan
->num
);
1085 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1089 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
1090 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
1093 if (command
== D40_DMA_SUSPEND_REQ
) {
1095 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
1096 status
= (readl(active_reg
) &
1097 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1098 D40_CHAN_POS(d40c
->phy_chan
->num
);
1102 * Reduce the number of bus accesses while
1103 * waiting for the DMA to suspend.
1107 if (status
== D40_DMA_STOP
||
1108 status
== D40_DMA_SUSPENDED
)
1112 if (i
== D40_SUSPEND_MAX_IT
) {
1114 "unable to suspend the chl %d (log: %d) status %x\n",
1115 d40c
->phy_chan
->num
, d40c
->log_num
,
1123 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
1127 static void d40_term_all(struct d40_chan
*d40c
)
1129 struct d40_desc
*d40d
;
1130 struct d40_desc
*_d
;
1132 /* Release completed descriptors */
1133 while ((d40d
= d40_first_done(d40c
))) {
1134 d40_desc_remove(d40d
);
1135 d40_desc_free(d40c
, d40d
);
1138 /* Release active descriptors */
1139 while ((d40d
= d40_first_active_get(d40c
))) {
1140 d40_desc_remove(d40d
);
1141 d40_desc_free(d40c
, d40d
);
1144 /* Release queued descriptors waiting for transfer */
1145 while ((d40d
= d40_first_queued(d40c
))) {
1146 d40_desc_remove(d40d
);
1147 d40_desc_free(d40c
, d40d
);
1150 /* Release pending descriptors */
1151 while ((d40d
= d40_first_pending(d40c
))) {
1152 d40_desc_remove(d40d
);
1153 d40_desc_free(d40c
, d40d
);
1156 /* Release client owned descriptors */
1157 if (!list_empty(&d40c
->client
))
1158 list_for_each_entry_safe(d40d
, _d
, &d40c
->client
, node
) {
1159 d40_desc_remove(d40d
);
1160 d40_desc_free(d40c
, d40d
);
1163 /* Release descriptors in prepare queue */
1164 if (!list_empty(&d40c
->prepare_queue
))
1165 list_for_each_entry_safe(d40d
, _d
,
1166 &d40c
->prepare_queue
, node
) {
1167 d40_desc_remove(d40d
);
1168 d40_desc_free(d40c
, d40d
);
1171 d40c
->pending_tx
= 0;
1174 static void __d40_config_set_event(struct d40_chan
*d40c
,
1175 enum d40_events event_type
, u32 event
,
1178 void __iomem
*addr
= chan_base(d40c
) + reg
;
1182 switch (event_type
) {
1184 case D40_DEACTIVATE_EVENTLINE
:
1186 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
1187 | ~D40_EVENTLINE_MASK(event
), addr
);
1190 case D40_SUSPEND_REQ_EVENTLINE
:
1191 status
= (readl(addr
) & D40_EVENTLINE_MASK(event
)) >>
1192 D40_EVENTLINE_POS(event
);
1194 if (status
== D40_DEACTIVATE_EVENTLINE
||
1195 status
== D40_SUSPEND_REQ_EVENTLINE
)
1198 writel((D40_SUSPEND_REQ_EVENTLINE
<< D40_EVENTLINE_POS(event
))
1199 | ~D40_EVENTLINE_MASK(event
), addr
);
1201 for (tries
= 0 ; tries
< D40_SUSPEND_MAX_IT
; tries
++) {
1203 status
= (readl(addr
) & D40_EVENTLINE_MASK(event
)) >>
1204 D40_EVENTLINE_POS(event
);
1208 * Reduce the number of bus accesses while
1209 * waiting for the DMA to suspend.
1213 if (status
== D40_DEACTIVATE_EVENTLINE
)
1217 if (tries
== D40_SUSPEND_MAX_IT
) {
1219 "unable to stop the event_line chl %d (log: %d)"
1220 "status %x\n", d40c
->phy_chan
->num
,
1221 d40c
->log_num
, status
);
1225 case D40_ACTIVATE_EVENTLINE
:
1227 * The hardware sometimes doesn't register the enable when src and dst
1228 * event lines are active on the same logical channel. Retry to ensure
1229 * it does. Usually only one retry is sufficient.
1233 writel((D40_ACTIVATE_EVENTLINE
<<
1234 D40_EVENTLINE_POS(event
)) |
1235 ~D40_EVENTLINE_MASK(event
), addr
);
1237 if (readl(addr
) & D40_EVENTLINE_MASK(event
))
1242 dev_dbg(chan2dev(d40c
),
1243 "[%s] workaround enable S%cLNK (%d tries)\n",
1244 __func__
, reg
== D40_CHAN_REG_SSLNK
? 'S' : 'D',
1250 case D40_ROUND_EVENTLINE
:
1257 static void d40_config_set_event(struct d40_chan
*d40c
,
1258 enum d40_events event_type
)
1260 /* Enable event line connected to device (or memcpy) */
1261 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
1262 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
1263 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1265 __d40_config_set_event(d40c
, event_type
, event
,
1266 D40_CHAN_REG_SSLNK
);
1269 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
1270 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1272 __d40_config_set_event(d40c
, event_type
, event
,
1273 D40_CHAN_REG_SDLNK
);
1277 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
1279 void __iomem
*chanbase
= chan_base(d40c
);
1282 val
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
1283 val
|= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
1289 __d40_execute_command_log(struct d40_chan
*d40c
, enum d40_command command
)
1291 unsigned long flags
;
1294 void __iomem
*active_reg
;
1296 if (d40c
->phy_chan
->num
% 2 == 0)
1297 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1299 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1302 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
1306 case D40_DMA_SUSPEND_REQ
:
1308 active_status
= (readl(active_reg
) &
1309 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1310 D40_CHAN_POS(d40c
->phy_chan
->num
);
1312 if (active_status
== D40_DMA_RUN
)
1313 d40_config_set_event(d40c
, D40_SUSPEND_REQ_EVENTLINE
);
1315 d40_config_set_event(d40c
, D40_DEACTIVATE_EVENTLINE
);
1317 if (!d40_chan_has_events(d40c
) && (command
== D40_DMA_STOP
))
1318 ret
= __d40_execute_command_phy(d40c
, command
);
1324 d40_config_set_event(d40c
, D40_ACTIVATE_EVENTLINE
);
1325 ret
= __d40_execute_command_phy(d40c
, command
);
1328 case D40_DMA_SUSPENDED
:
1333 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
1337 static int d40_channel_execute_command(struct d40_chan
*d40c
,
1338 enum d40_command command
)
1340 if (chan_is_logical(d40c
))
1341 return __d40_execute_command_log(d40c
, command
);
1343 return __d40_execute_command_phy(d40c
, command
);
1346 static u32
d40_get_prmo(struct d40_chan
*d40c
)
1348 static const unsigned int phy_map
[] = {
1349 [STEDMA40_PCHAN_BASIC_MODE
]
1350 = D40_DREG_PRMO_PCHAN_BASIC
,
1351 [STEDMA40_PCHAN_MODULO_MODE
]
1352 = D40_DREG_PRMO_PCHAN_MODULO
,
1353 [STEDMA40_PCHAN_DOUBLE_DST_MODE
]
1354 = D40_DREG_PRMO_PCHAN_DOUBLE_DST
,
1356 static const unsigned int log_map
[] = {
1357 [STEDMA40_LCHAN_SRC_PHY_DST_LOG
]
1358 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG
,
1359 [STEDMA40_LCHAN_SRC_LOG_DST_PHY
]
1360 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY
,
1361 [STEDMA40_LCHAN_SRC_LOG_DST_LOG
]
1362 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
1365 if (chan_is_physical(d40c
))
1366 return phy_map
[d40c
->dma_cfg
.mode_opt
];
1368 return log_map
[d40c
->dma_cfg
.mode_opt
];
1371 static void d40_config_write(struct d40_chan
*d40c
)
1376 /* Odd addresses are even addresses + 4 */
1377 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
1378 /* Setup channel mode to logical or physical */
1379 var
= ((u32
)(chan_is_logical(d40c
)) + 1) <<
1380 D40_CHAN_POS(d40c
->phy_chan
->num
);
1381 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
1383 /* Setup operational mode option register */
1384 var
= d40_get_prmo(d40c
) << D40_CHAN_POS(d40c
->phy_chan
->num
);
1386 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
1388 if (chan_is_logical(d40c
)) {
1389 int lidx
= (d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
)
1390 & D40_SREG_ELEM_LOG_LIDX_MASK
;
1391 void __iomem
*chanbase
= chan_base(d40c
);
1393 /* Set default config for CFG reg */
1394 writel(d40c
->src_def_cfg
, chanbase
+ D40_CHAN_REG_SSCFG
);
1395 writel(d40c
->dst_def_cfg
, chanbase
+ D40_CHAN_REG_SDCFG
);
1397 /* Set LIDX for lcla */
1398 writel(lidx
, chanbase
+ D40_CHAN_REG_SSELT
);
1399 writel(lidx
, chanbase
+ D40_CHAN_REG_SDELT
);
1401 /* Clear LNK which will be used by d40_chan_has_events() */
1402 writel(0, chanbase
+ D40_CHAN_REG_SSLNK
);
1403 writel(0, chanbase
+ D40_CHAN_REG_SDLNK
);
1407 static u32
d40_residue(struct d40_chan
*d40c
)
1411 if (chan_is_logical(d40c
))
1412 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1413 >> D40_MEM_LCSP2_ECNT_POS
;
1415 u32 val
= readl(chan_base(d40c
) + D40_CHAN_REG_SDELT
);
1416 num_elt
= (val
& D40_SREG_ELEM_PHY_ECNT_MASK
)
1417 >> D40_SREG_ELEM_PHY_ECNT_POS
;
1420 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1423 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1427 if (chan_is_logical(d40c
))
1428 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1430 is_link
= readl(chan_base(d40c
) + D40_CHAN_REG_SDLNK
)
1431 & D40_SREG_LNK_PHYS_LNK_MASK
;
1436 static int d40_pause(struct d40_chan
*d40c
)
1439 unsigned long flags
;
1444 pm_runtime_get_sync(d40c
->base
->dev
);
1445 spin_lock_irqsave(&d40c
->lock
, flags
);
1447 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1449 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1450 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1451 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1455 static int d40_resume(struct d40_chan
*d40c
)
1458 unsigned long flags
;
1463 spin_lock_irqsave(&d40c
->lock
, flags
);
1464 pm_runtime_get_sync(d40c
->base
->dev
);
1466 /* If bytes left to transfer or linked tx resume job */
1467 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
))
1468 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1470 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1471 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1472 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1476 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
1478 struct d40_chan
*d40c
= container_of(tx
->chan
,
1481 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
1482 unsigned long flags
;
1483 dma_cookie_t cookie
;
1485 spin_lock_irqsave(&d40c
->lock
, flags
);
1486 cookie
= dma_cookie_assign(tx
);
1487 d40_desc_queue(d40c
, d40d
);
1488 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1493 static int d40_start(struct d40_chan
*d40c
)
1495 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1498 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
1500 struct d40_desc
*d40d
;
1503 /* Start queued jobs, if any */
1504 d40d
= d40_first_queued(d40c
);
1509 pm_runtime_get_sync(d40c
->base
->dev
);
1512 /* Remove from queue */
1513 d40_desc_remove(d40d
);
1515 /* Add to active queue */
1516 d40_desc_submit(d40c
, d40d
);
1518 /* Initiate DMA job */
1519 d40_desc_load(d40c
, d40d
);
1522 err
= d40_start(d40c
);
1531 /* called from interrupt context */
1532 static void dma_tc_handle(struct d40_chan
*d40c
)
1534 struct d40_desc
*d40d
;
1536 /* Get first active entry from list */
1537 d40d
= d40_first_active_get(d40c
);
1544 * If this was a paritially loaded list, we need to reloaded
1545 * it, and only when the list is completed. We need to check
1546 * for done because the interrupt will hit for every link, and
1547 * not just the last one.
1549 if (d40d
->lli_current
< d40d
->lli_len
1550 && !d40_tx_is_linked(d40c
)
1551 && !d40_residue(d40c
)) {
1552 d40_lcla_free_all(d40c
, d40d
);
1553 d40_desc_load(d40c
, d40d
);
1554 (void) d40_start(d40c
);
1556 if (d40d
->lli_current
== d40d
->lli_len
)
1557 d40d
->lli_current
= 0;
1560 d40_lcla_free_all(d40c
, d40d
);
1562 if (d40d
->lli_current
< d40d
->lli_len
) {
1563 d40_desc_load(d40c
, d40d
);
1565 (void) d40_start(d40c
);
1569 if (d40_queue_start(d40c
) == NULL
)
1571 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1572 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1574 d40_desc_remove(d40d
);
1575 d40_desc_done(d40c
, d40d
);
1579 tasklet_schedule(&d40c
->tasklet
);
1583 static void dma_tasklet(unsigned long data
)
1585 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
1586 struct d40_desc
*d40d
;
1587 unsigned long flags
;
1588 dma_async_tx_callback callback
;
1589 void *callback_param
;
1591 spin_lock_irqsave(&d40c
->lock
, flags
);
1593 /* Get first entry from the done list */
1594 d40d
= d40_first_done(d40c
);
1596 /* Check if we have reached here for cyclic job */
1597 d40d
= d40_first_active_get(d40c
);
1598 if (d40d
== NULL
|| !d40d
->cyclic
)
1603 dma_cookie_complete(&d40d
->txd
);
1606 * If terminating a channel pending_tx is set to zero.
1607 * This prevents any finished active jobs to return to the client.
1609 if (d40c
->pending_tx
== 0) {
1610 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1614 /* Callback to client */
1615 callback
= d40d
->txd
.callback
;
1616 callback_param
= d40d
->txd
.callback_param
;
1618 if (!d40d
->cyclic
) {
1619 if (async_tx_test_ack(&d40d
->txd
)) {
1620 d40_desc_remove(d40d
);
1621 d40_desc_free(d40c
, d40d
);
1622 } else if (!d40d
->is_in_client_list
) {
1623 d40_desc_remove(d40d
);
1624 d40_lcla_free_all(d40c
, d40d
);
1625 list_add_tail(&d40d
->node
, &d40c
->client
);
1626 d40d
->is_in_client_list
= true;
1632 if (d40c
->pending_tx
)
1633 tasklet_schedule(&d40c
->tasklet
);
1635 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1637 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1638 callback(callback_param
);
1643 /* Rescue manouver if receiving double interrupts */
1644 if (d40c
->pending_tx
> 0)
1646 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1649 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1655 struct d40_chan
*d40c
;
1656 unsigned long flags
;
1657 struct d40_base
*base
= data
;
1658 u32 regs
[base
->gen_dmac
.il_size
];
1659 struct d40_interrupt_lookup
*il
= base
->gen_dmac
.il
;
1660 u32 il_size
= base
->gen_dmac
.il_size
;
1662 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1664 /* Read interrupt status of both logical and physical channels */
1665 for (i
= 0; i
< il_size
; i
++)
1666 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1670 chan
= find_next_bit((unsigned long *)regs
,
1671 BITS_PER_LONG
* il_size
, chan
+ 1);
1673 /* No more set bits found? */
1674 if (chan
== BITS_PER_LONG
* il_size
)
1677 row
= chan
/ BITS_PER_LONG
;
1678 idx
= chan
& (BITS_PER_LONG
- 1);
1680 if (il
[row
].offset
== D40_PHY_CHAN
)
1681 d40c
= base
->lookup_phy_chans
[idx
];
1683 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1687 * No error because this can happen if something else
1688 * in the system is using the channel.
1694 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1696 spin_lock(&d40c
->lock
);
1698 if (!il
[row
].is_error
)
1699 dma_tc_handle(d40c
);
1701 d40_err(base
->dev
, "IRQ chan: %ld offset %d idx %d\n",
1702 chan
, il
[row
].offset
, idx
);
1704 spin_unlock(&d40c
->lock
);
1707 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1712 static int d40_validate_conf(struct d40_chan
*d40c
,
1713 struct stedma40_chan_cfg
*conf
)
1716 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
1717 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
1718 bool is_log
= conf
->mode
== STEDMA40_MODE_LOGICAL
;
1721 chan_err(d40c
, "Invalid direction.\n");
1725 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
1726 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
1727 d40c
->runtime_addr
== 0) {
1729 chan_err(d40c
, "Invalid TX channel address (%d)\n",
1730 conf
->dst_dev_type
);
1734 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
1735 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
1736 d40c
->runtime_addr
== 0) {
1737 chan_err(d40c
, "Invalid RX channel address (%d)\n",
1738 conf
->src_dev_type
);
1742 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1743 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1744 chan_err(d40c
, "Invalid dst\n");
1748 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1749 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1750 chan_err(d40c
, "Invalid src\n");
1754 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1755 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1756 chan_err(d40c
, "No event line\n");
1760 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1761 (src_event_group
!= dst_event_group
)) {
1762 chan_err(d40c
, "Invalid event group\n");
1766 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1768 * DMAC HW supports it. Will be added to this driver,
1769 * in case any dma client requires it.
1771 chan_err(d40c
, "periph to periph not supported\n");
1775 if (d40_psize_2_burst_size(is_log
, conf
->src_info
.psize
) *
1776 (1 << conf
->src_info
.data_width
) !=
1777 d40_psize_2_burst_size(is_log
, conf
->dst_info
.psize
) *
1778 (1 << conf
->dst_info
.data_width
)) {
1780 * The DMAC hardware only supports
1781 * src (burst x width) == dst (burst x width)
1784 chan_err(d40c
, "src (burst x width) != dst (burst x width)\n");
1791 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
,
1792 bool is_src
, int log_event_line
, bool is_log
,
1795 unsigned long flags
;
1796 spin_lock_irqsave(&phy
->lock
, flags
);
1798 *first_user
= ((phy
->allocated_src
| phy
->allocated_dst
)
1802 /* Physical interrupts are masked per physical full channel */
1803 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1804 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1805 phy
->allocated_dst
= D40_ALLOC_PHY
;
1806 phy
->allocated_src
= D40_ALLOC_PHY
;
1812 /* Logical channel */
1814 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1817 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1818 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1820 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1821 phy
->allocated_src
|= 1 << log_event_line
;
1826 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1829 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1830 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1832 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1833 phy
->allocated_dst
|= 1 << log_event_line
;
1840 spin_unlock_irqrestore(&phy
->lock
, flags
);
1843 spin_unlock_irqrestore(&phy
->lock
, flags
);
1847 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1850 unsigned long flags
;
1851 bool is_free
= false;
1853 spin_lock_irqsave(&phy
->lock
, flags
);
1854 if (!log_event_line
) {
1855 phy
->allocated_dst
= D40_ALLOC_FREE
;
1856 phy
->allocated_src
= D40_ALLOC_FREE
;
1861 /* Logical channel */
1863 phy
->allocated_src
&= ~(1 << log_event_line
);
1864 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1865 phy
->allocated_src
= D40_ALLOC_FREE
;
1867 phy
->allocated_dst
&= ~(1 << log_event_line
);
1868 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1869 phy
->allocated_dst
= D40_ALLOC_FREE
;
1872 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1876 spin_unlock_irqrestore(&phy
->lock
, flags
);
1881 static int d40_allocate_channel(struct d40_chan
*d40c
, bool *first_phy_user
)
1886 struct d40_phy_res
*phys
;
1892 bool is_log
= d40c
->dma_cfg
.mode
== STEDMA40_MODE_LOGICAL
;
1894 phys
= d40c
->base
->phy_res
;
1895 num_phy_chans
= d40c
->base
->num_phy_chans
;
1897 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1898 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1899 log_num
= 2 * dev_type
;
1901 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1902 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1903 /* dst event lines are used for logical memcpy */
1904 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1905 log_num
= 2 * dev_type
+ 1;
1910 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1911 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1914 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1915 /* Find physical half channel */
1916 if (d40c
->dma_cfg
.use_fixed_channel
) {
1917 i
= d40c
->dma_cfg
.phy_channel
;
1918 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1923 for (i
= 0; i
< num_phy_chans
; i
++) {
1924 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1931 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1932 int phy_num
= j
+ event_group
* 2;
1933 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1934 if (d40_alloc_mask_set(&phys
[i
],
1944 d40c
->phy_chan
= &phys
[i
];
1945 d40c
->log_num
= D40_PHY_CHAN
;
1951 /* Find logical channel */
1952 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1953 int phy_num
= j
+ event_group
* 2;
1955 if (d40c
->dma_cfg
.use_fixed_channel
) {
1956 i
= d40c
->dma_cfg
.phy_channel
;
1958 if ((i
!= phy_num
) && (i
!= phy_num
+ 1)) {
1959 dev_err(chan2dev(d40c
),
1960 "invalid fixed phy channel %d\n", i
);
1964 if (d40_alloc_mask_set(&phys
[i
], is_src
, event_line
,
1965 is_log
, first_phy_user
))
1968 dev_err(chan2dev(d40c
),
1969 "could not allocate fixed phy channel %d\n", i
);
1974 * Spread logical channels across all available physical rather
1975 * than pack every logical channel at the first available phy
1979 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1980 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1986 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1987 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1997 d40c
->phy_chan
= &phys
[i
];
1998 d40c
->log_num
= log_num
;
2002 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
2004 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
2010 static int d40_config_memcpy(struct d40_chan
*d40c
)
2012 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
2014 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
2015 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
2016 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
2017 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
2018 memcpy
[d40c
->chan
.chan_id
];
2020 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
2021 dma_has_cap(DMA_SLAVE
, cap
)) {
2022 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
2024 chan_err(d40c
, "No memcpy\n");
2031 static int d40_free_dma(struct d40_chan
*d40c
)
2036 struct d40_phy_res
*phy
= d40c
->phy_chan
;
2039 /* Terminate all queued and active transfers */
2043 chan_err(d40c
, "phy == null\n");
2047 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
2048 phy
->allocated_dst
== D40_ALLOC_FREE
) {
2049 chan_err(d40c
, "channel already free\n");
2053 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
2054 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
2055 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
2057 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
2058 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
2061 chan_err(d40c
, "Unknown direction\n");
2065 pm_runtime_get_sync(d40c
->base
->dev
);
2066 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
2068 chan_err(d40c
, "stop failed\n");
2072 d40_alloc_mask_free(phy
, is_src
, chan_is_logical(d40c
) ? event
: 0);
2074 if (chan_is_logical(d40c
))
2075 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
2077 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
2080 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2081 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2085 d40c
->phy_chan
= NULL
;
2086 d40c
->configured
= false;
2089 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2090 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2094 static bool d40_is_paused(struct d40_chan
*d40c
)
2096 void __iomem
*chanbase
= chan_base(d40c
);
2097 bool is_paused
= false;
2098 unsigned long flags
;
2099 void __iomem
*active_reg
;
2103 spin_lock_irqsave(&d40c
->lock
, flags
);
2105 if (chan_is_physical(d40c
)) {
2106 if (d40c
->phy_chan
->num
% 2 == 0)
2107 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
2109 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
2111 status
= (readl(active_reg
) &
2112 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
2113 D40_CHAN_POS(d40c
->phy_chan
->num
);
2114 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
2120 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
2121 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
2122 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
2123 status
= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
2124 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
2125 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
2126 status
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
2128 chan_err(d40c
, "Unknown direction\n");
2132 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
2133 D40_EVENTLINE_POS(event
);
2135 if (status
!= D40_DMA_RUN
)
2138 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2143 static u32
stedma40_residue(struct dma_chan
*chan
)
2145 struct d40_chan
*d40c
=
2146 container_of(chan
, struct d40_chan
, chan
);
2148 unsigned long flags
;
2150 spin_lock_irqsave(&d40c
->lock
, flags
);
2151 bytes_left
= d40_residue(d40c
);
2152 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2158 d40_prep_sg_log(struct d40_chan
*chan
, struct d40_desc
*desc
,
2159 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
2160 unsigned int sg_len
, dma_addr_t src_dev_addr
,
2161 dma_addr_t dst_dev_addr
)
2163 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2164 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
2165 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
2168 ret
= d40_log_sg_to_lli(sg_src
, sg_len
,
2171 chan
->log_def
.lcsp1
,
2172 src_info
->data_width
,
2173 dst_info
->data_width
);
2175 ret
= d40_log_sg_to_lli(sg_dst
, sg_len
,
2178 chan
->log_def
.lcsp3
,
2179 dst_info
->data_width
,
2180 src_info
->data_width
);
2182 return ret
< 0 ? ret
: 0;
2186 d40_prep_sg_phy(struct d40_chan
*chan
, struct d40_desc
*desc
,
2187 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
2188 unsigned int sg_len
, dma_addr_t src_dev_addr
,
2189 dma_addr_t dst_dev_addr
)
2191 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2192 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
2193 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
2194 unsigned long flags
= 0;
2198 flags
|= LLI_CYCLIC
| LLI_TERM_INT
;
2200 ret
= d40_phy_sg_to_lli(sg_src
, sg_len
, src_dev_addr
,
2202 virt_to_phys(desc
->lli_phy
.src
),
2204 src_info
, dst_info
, flags
);
2206 ret
= d40_phy_sg_to_lli(sg_dst
, sg_len
, dst_dev_addr
,
2208 virt_to_phys(desc
->lli_phy
.dst
),
2210 dst_info
, src_info
, flags
);
2212 dma_sync_single_for_device(chan
->base
->dev
, desc
->lli_pool
.dma_addr
,
2213 desc
->lli_pool
.size
, DMA_TO_DEVICE
);
2215 return ret
< 0 ? ret
: 0;
2218 static struct d40_desc
*
2219 d40_prep_desc(struct d40_chan
*chan
, struct scatterlist
*sg
,
2220 unsigned int sg_len
, unsigned long dma_flags
)
2222 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2223 struct d40_desc
*desc
;
2226 desc
= d40_desc_get(chan
);
2230 desc
->lli_len
= d40_sg_2_dmalen(sg
, sg_len
, cfg
->src_info
.data_width
,
2231 cfg
->dst_info
.data_width
);
2232 if (desc
->lli_len
< 0) {
2233 chan_err(chan
, "Unaligned size\n");
2237 ret
= d40_pool_lli_alloc(chan
, desc
, desc
->lli_len
);
2239 chan_err(chan
, "Could not allocate lli\n");
2243 desc
->lli_current
= 0;
2244 desc
->txd
.flags
= dma_flags
;
2245 desc
->txd
.tx_submit
= d40_tx_submit
;
2247 dma_async_tx_descriptor_init(&desc
->txd
, &chan
->chan
);
2252 d40_desc_free(chan
, desc
);
2257 d40_get_dev_addr(struct d40_chan
*chan
, enum dma_transfer_direction direction
)
2259 struct stedma40_platform_data
*plat
= chan
->base
->plat_data
;
2260 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2261 dma_addr_t addr
= 0;
2263 if (chan
->runtime_addr
)
2264 return chan
->runtime_addr
;
2266 if (direction
== DMA_DEV_TO_MEM
)
2267 addr
= plat
->dev_rx
[cfg
->src_dev_type
];
2268 else if (direction
== DMA_MEM_TO_DEV
)
2269 addr
= plat
->dev_tx
[cfg
->dst_dev_type
];
2274 static struct dma_async_tx_descriptor
*
2275 d40_prep_sg(struct dma_chan
*dchan
, struct scatterlist
*sg_src
,
2276 struct scatterlist
*sg_dst
, unsigned int sg_len
,
2277 enum dma_transfer_direction direction
, unsigned long dma_flags
)
2279 struct d40_chan
*chan
= container_of(dchan
, struct d40_chan
, chan
);
2280 dma_addr_t src_dev_addr
= 0;
2281 dma_addr_t dst_dev_addr
= 0;
2282 struct d40_desc
*desc
;
2283 unsigned long flags
;
2286 if (!chan
->phy_chan
) {
2287 chan_err(chan
, "Cannot prepare unallocated channel\n");
2291 spin_lock_irqsave(&chan
->lock
, flags
);
2293 desc
= d40_prep_desc(chan
, sg_src
, sg_len
, dma_flags
);
2297 if (sg_next(&sg_src
[sg_len
- 1]) == sg_src
)
2298 desc
->cyclic
= true;
2300 if (direction
!= DMA_TRANS_NONE
) {
2301 dma_addr_t dev_addr
= d40_get_dev_addr(chan
, direction
);
2303 if (direction
== DMA_DEV_TO_MEM
)
2304 src_dev_addr
= dev_addr
;
2305 else if (direction
== DMA_MEM_TO_DEV
)
2306 dst_dev_addr
= dev_addr
;
2309 if (chan_is_logical(chan
))
2310 ret
= d40_prep_sg_log(chan
, desc
, sg_src
, sg_dst
,
2311 sg_len
, src_dev_addr
, dst_dev_addr
);
2313 ret
= d40_prep_sg_phy(chan
, desc
, sg_src
, sg_dst
,
2314 sg_len
, src_dev_addr
, dst_dev_addr
);
2317 chan_err(chan
, "Failed to prepare %s sg job: %d\n",
2318 chan_is_logical(chan
) ? "log" : "phy", ret
);
2323 * add descriptor to the prepare queue in order to be able
2324 * to free them later in terminate_all
2326 list_add_tail(&desc
->node
, &chan
->prepare_queue
);
2328 spin_unlock_irqrestore(&chan
->lock
, flags
);
2334 d40_desc_free(chan
, desc
);
2335 spin_unlock_irqrestore(&chan
->lock
, flags
);
2339 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
2341 struct stedma40_chan_cfg
*info
= data
;
2342 struct d40_chan
*d40c
=
2343 container_of(chan
, struct d40_chan
, chan
);
2347 err
= d40_validate_conf(d40c
, info
);
2349 d40c
->dma_cfg
= *info
;
2351 err
= d40_config_memcpy(d40c
);
2354 d40c
->configured
= true;
2358 EXPORT_SYMBOL(stedma40_filter
);
2360 static void __d40_set_prio_rt(struct d40_chan
*d40c
, int dev_type
, bool src
)
2362 bool realtime
= d40c
->dma_cfg
.realtime
;
2363 bool highprio
= d40c
->dma_cfg
.high_priority
;
2365 u32 event
= D40_TYPE_TO_EVENT(dev_type
);
2366 u32 group
= D40_TYPE_TO_GROUP(dev_type
);
2367 u32 bit
= 1 << event
;
2369 struct d40_gen_dmac
*dmac
= &d40c
->base
->gen_dmac
;
2371 rtreg
= realtime
? dmac
->realtime_en
: dmac
->realtime_clear
;
2373 * Due to a hardware bug, in some cases a logical channel triggered by
2374 * a high priority destination event line can generate extra packet
2377 * The workaround is to not set the high priority level for the
2378 * destination event lines that trigger logical channels.
2380 if (!src
&& chan_is_logical(d40c
))
2383 prioreg
= highprio
? dmac
->high_prio_en
: dmac
->high_prio_clear
;
2385 /* Destination event lines are stored in the upper halfword */
2389 writel(bit
, d40c
->base
->virtbase
+ prioreg
+ group
* 4);
2390 writel(bit
, d40c
->base
->virtbase
+ rtreg
+ group
* 4);
2393 static void d40_set_prio_realtime(struct d40_chan
*d40c
)
2395 if (d40c
->base
->rev
< 3)
2398 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
2399 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
2400 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.src_dev_type
, true);
2402 if ((d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
) ||
2403 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
2404 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.dst_dev_type
, false);
2407 /* DMA ENGINE functions */
2408 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
2411 unsigned long flags
;
2412 struct d40_chan
*d40c
=
2413 container_of(chan
, struct d40_chan
, chan
);
2415 spin_lock_irqsave(&d40c
->lock
, flags
);
2417 dma_cookie_init(chan
);
2419 /* If no dma configuration is set use default configuration (memcpy) */
2420 if (!d40c
->configured
) {
2421 err
= d40_config_memcpy(d40c
);
2423 chan_err(d40c
, "Failed to configure memcpy channel\n");
2428 err
= d40_allocate_channel(d40c
, &is_free_phy
);
2430 chan_err(d40c
, "Failed to allocate channel\n");
2431 d40c
->configured
= false;
2435 pm_runtime_get_sync(d40c
->base
->dev
);
2436 /* Fill in basic CFG register values */
2437 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
2438 &d40c
->dst_def_cfg
, chan_is_logical(d40c
));
2440 d40_set_prio_realtime(d40c
);
2442 if (chan_is_logical(d40c
)) {
2443 d40_log_cfg(&d40c
->dma_cfg
,
2444 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2446 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
2447 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2448 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
2450 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2451 d40c
->dma_cfg
.dst_dev_type
*
2452 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
2455 dev_dbg(chan2dev(d40c
), "allocated %s channel (phy %d%s)\n",
2456 chan_is_logical(d40c
) ? "logical" : "physical",
2457 d40c
->phy_chan
->num
,
2458 d40c
->dma_cfg
.use_fixed_channel
? ", fixed" : "");
2462 * Only write channel configuration to the DMA if the physical
2463 * resource is free. In case of multiple logical channels
2464 * on the same physical resource, only the first write is necessary.
2467 d40_config_write(d40c
);
2469 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2470 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2471 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2475 static void d40_free_chan_resources(struct dma_chan
*chan
)
2477 struct d40_chan
*d40c
=
2478 container_of(chan
, struct d40_chan
, chan
);
2480 unsigned long flags
;
2482 if (d40c
->phy_chan
== NULL
) {
2483 chan_err(d40c
, "Cannot free unallocated channel\n");
2487 spin_lock_irqsave(&d40c
->lock
, flags
);
2489 err
= d40_free_dma(d40c
);
2492 chan_err(d40c
, "Failed to free channel\n");
2493 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2496 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
2500 unsigned long dma_flags
)
2502 struct scatterlist dst_sg
;
2503 struct scatterlist src_sg
;
2505 sg_init_table(&dst_sg
, 1);
2506 sg_init_table(&src_sg
, 1);
2508 sg_dma_address(&dst_sg
) = dst
;
2509 sg_dma_address(&src_sg
) = src
;
2511 sg_dma_len(&dst_sg
) = size
;
2512 sg_dma_len(&src_sg
) = size
;
2514 return d40_prep_sg(chan
, &src_sg
, &dst_sg
, 1, DMA_NONE
, dma_flags
);
2517 static struct dma_async_tx_descriptor
*
2518 d40_prep_memcpy_sg(struct dma_chan
*chan
,
2519 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
2520 struct scatterlist
*src_sg
, unsigned int src_nents
,
2521 unsigned long dma_flags
)
2523 if (dst_nents
!= src_nents
)
2526 return d40_prep_sg(chan
, src_sg
, dst_sg
, src_nents
, DMA_NONE
, dma_flags
);
2529 static struct dma_async_tx_descriptor
*
2530 d40_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2531 unsigned int sg_len
, enum dma_transfer_direction direction
,
2532 unsigned long dma_flags
, void *context
)
2534 if (!is_slave_direction(direction
))
2537 return d40_prep_sg(chan
, sgl
, sgl
, sg_len
, direction
, dma_flags
);
2540 static struct dma_async_tx_descriptor
*
2541 dma40_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
2542 size_t buf_len
, size_t period_len
,
2543 enum dma_transfer_direction direction
, unsigned long flags
,
2546 unsigned int periods
= buf_len
/ period_len
;
2547 struct dma_async_tx_descriptor
*txd
;
2548 struct scatterlist
*sg
;
2551 sg
= kcalloc(periods
+ 1, sizeof(struct scatterlist
), GFP_NOWAIT
);
2552 for (i
= 0; i
< periods
; i
++) {
2553 sg_dma_address(&sg
[i
]) = dma_addr
;
2554 sg_dma_len(&sg
[i
]) = period_len
;
2555 dma_addr
+= period_len
;
2558 sg
[periods
].offset
= 0;
2559 sg_dma_len(&sg
[periods
]) = 0;
2560 sg
[periods
].page_link
=
2561 ((unsigned long)sg
| 0x01) & ~0x02;
2563 txd
= d40_prep_sg(chan
, sg
, sg
, periods
, direction
,
2564 DMA_PREP_INTERRUPT
);
2571 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2572 dma_cookie_t cookie
,
2573 struct dma_tx_state
*txstate
)
2575 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2576 enum dma_status ret
;
2578 if (d40c
->phy_chan
== NULL
) {
2579 chan_err(d40c
, "Cannot read status of unallocated channel\n");
2583 ret
= dma_cookie_status(chan
, cookie
, txstate
);
2584 if (ret
!= DMA_SUCCESS
)
2585 dma_set_residue(txstate
, stedma40_residue(chan
));
2587 if (d40_is_paused(d40c
))
2593 static void d40_issue_pending(struct dma_chan
*chan
)
2595 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2596 unsigned long flags
;
2598 if (d40c
->phy_chan
== NULL
) {
2599 chan_err(d40c
, "Channel is not allocated!\n");
2603 spin_lock_irqsave(&d40c
->lock
, flags
);
2605 list_splice_tail_init(&d40c
->pending_queue
, &d40c
->queue
);
2607 /* Busy means that queued jobs are already being processed */
2609 (void) d40_queue_start(d40c
);
2611 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2614 static void d40_terminate_all(struct dma_chan
*chan
)
2616 unsigned long flags
;
2617 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2620 spin_lock_irqsave(&d40c
->lock
, flags
);
2622 pm_runtime_get_sync(d40c
->base
->dev
);
2623 ret
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
2625 chan_err(d40c
, "Failed to stop channel\n");
2628 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2629 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2631 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2632 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2636 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2640 dma40_config_to_halfchannel(struct d40_chan
*d40c
,
2641 struct stedma40_half_channel_info
*info
,
2642 enum dma_slave_buswidth width
,
2645 enum stedma40_periph_data_width addr_width
;
2649 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2650 addr_width
= STEDMA40_BYTE_WIDTH
;
2652 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2653 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2655 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2656 addr_width
= STEDMA40_WORD_WIDTH
;
2658 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2659 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2662 dev_err(d40c
->base
->dev
,
2663 "illegal peripheral address width "
2669 if (chan_is_logical(d40c
)) {
2671 psize
= STEDMA40_PSIZE_LOG_16
;
2672 else if (maxburst
>= 8)
2673 psize
= STEDMA40_PSIZE_LOG_8
;
2674 else if (maxburst
>= 4)
2675 psize
= STEDMA40_PSIZE_LOG_4
;
2677 psize
= STEDMA40_PSIZE_LOG_1
;
2680 psize
= STEDMA40_PSIZE_PHY_16
;
2681 else if (maxburst
>= 8)
2682 psize
= STEDMA40_PSIZE_PHY_8
;
2683 else if (maxburst
>= 4)
2684 psize
= STEDMA40_PSIZE_PHY_4
;
2686 psize
= STEDMA40_PSIZE_PHY_1
;
2689 info
->data_width
= addr_width
;
2690 info
->psize
= psize
;
2691 info
->flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2696 /* Runtime reconfiguration extension */
2697 static int d40_set_runtime_config(struct dma_chan
*chan
,
2698 struct dma_slave_config
*config
)
2700 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2701 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2702 enum dma_slave_buswidth src_addr_width
, dst_addr_width
;
2703 dma_addr_t config_addr
;
2704 u32 src_maxburst
, dst_maxburst
;
2707 src_addr_width
= config
->src_addr_width
;
2708 src_maxburst
= config
->src_maxburst
;
2709 dst_addr_width
= config
->dst_addr_width
;
2710 dst_maxburst
= config
->dst_maxburst
;
2712 if (config
->direction
== DMA_DEV_TO_MEM
) {
2713 dma_addr_t dev_addr_rx
=
2714 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2716 config_addr
= config
->src_addr
;
2718 dev_dbg(d40c
->base
->dev
,
2719 "channel has a pre-wired RX address %08x "
2720 "overriding with %08x\n",
2721 dev_addr_rx
, config_addr
);
2722 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2723 dev_dbg(d40c
->base
->dev
,
2724 "channel was not configured for peripheral "
2725 "to memory transfer (%d) overriding\n",
2727 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2729 /* Configure the memory side */
2730 if (dst_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
2731 dst_addr_width
= src_addr_width
;
2732 if (dst_maxburst
== 0)
2733 dst_maxburst
= src_maxburst
;
2735 } else if (config
->direction
== DMA_MEM_TO_DEV
) {
2736 dma_addr_t dev_addr_tx
=
2737 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2739 config_addr
= config
->dst_addr
;
2741 dev_dbg(d40c
->base
->dev
,
2742 "channel has a pre-wired TX address %08x "
2743 "overriding with %08x\n",
2744 dev_addr_tx
, config_addr
);
2745 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2746 dev_dbg(d40c
->base
->dev
,
2747 "channel was not configured for memory "
2748 "to peripheral transfer (%d) overriding\n",
2750 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2752 /* Configure the memory side */
2753 if (src_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
2754 src_addr_width
= dst_addr_width
;
2755 if (src_maxburst
== 0)
2756 src_maxburst
= dst_maxburst
;
2758 dev_err(d40c
->base
->dev
,
2759 "unrecognized channel direction %d\n",
2764 if (src_maxburst
* src_addr_width
!= dst_maxburst
* dst_addr_width
) {
2765 dev_err(d40c
->base
->dev
,
2766 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2774 if (src_maxburst
> 16) {
2776 dst_maxburst
= src_maxburst
* src_addr_width
/ dst_addr_width
;
2777 } else if (dst_maxburst
> 16) {
2779 src_maxburst
= dst_maxburst
* dst_addr_width
/ src_addr_width
;
2782 ret
= dma40_config_to_halfchannel(d40c
, &cfg
->src_info
,
2788 ret
= dma40_config_to_halfchannel(d40c
, &cfg
->dst_info
,
2794 /* Fill in register values */
2795 if (chan_is_logical(d40c
))
2796 d40_log_cfg(cfg
, &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2798 d40_phy_cfg(cfg
, &d40c
->src_def_cfg
,
2799 &d40c
->dst_def_cfg
, false);
2801 /* These settings will take precedence later */
2802 d40c
->runtime_addr
= config_addr
;
2803 d40c
->runtime_direction
= config
->direction
;
2804 dev_dbg(d40c
->base
->dev
,
2805 "configured channel %s for %s, data width %d/%d, "
2806 "maxburst %d/%d elements, LE, no flow control\n",
2807 dma_chan_name(chan
),
2808 (config
->direction
== DMA_DEV_TO_MEM
) ? "RX" : "TX",
2809 src_addr_width
, dst_addr_width
,
2810 src_maxburst
, dst_maxburst
);
2815 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2818 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2820 if (d40c
->phy_chan
== NULL
) {
2821 chan_err(d40c
, "Channel is not allocated!\n");
2826 case DMA_TERMINATE_ALL
:
2827 d40_terminate_all(chan
);
2830 return d40_pause(d40c
);
2832 return d40_resume(d40c
);
2833 case DMA_SLAVE_CONFIG
:
2834 return d40_set_runtime_config(chan
,
2835 (struct dma_slave_config
*) arg
);
2840 /* Other commands are unimplemented */
2844 /* Initialization functions */
2846 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2847 struct d40_chan
*chans
, int offset
,
2851 struct d40_chan
*d40c
;
2853 INIT_LIST_HEAD(&dma
->channels
);
2855 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2858 d40c
->chan
.device
= dma
;
2860 spin_lock_init(&d40c
->lock
);
2862 d40c
->log_num
= D40_PHY_CHAN
;
2864 INIT_LIST_HEAD(&d40c
->done
);
2865 INIT_LIST_HEAD(&d40c
->active
);
2866 INIT_LIST_HEAD(&d40c
->queue
);
2867 INIT_LIST_HEAD(&d40c
->pending_queue
);
2868 INIT_LIST_HEAD(&d40c
->client
);
2869 INIT_LIST_HEAD(&d40c
->prepare_queue
);
2871 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2872 (unsigned long) d40c
);
2874 list_add_tail(&d40c
->chan
.device_node
,
2879 static void d40_ops_init(struct d40_base
*base
, struct dma_device
*dev
)
2881 if (dma_has_cap(DMA_SLAVE
, dev
->cap_mask
))
2882 dev
->device_prep_slave_sg
= d40_prep_slave_sg
;
2884 if (dma_has_cap(DMA_MEMCPY
, dev
->cap_mask
)) {
2885 dev
->device_prep_dma_memcpy
= d40_prep_memcpy
;
2888 * This controller can only access address at even
2889 * 32bit boundaries, i.e. 2^2
2891 dev
->copy_align
= 2;
2894 if (dma_has_cap(DMA_SG
, dev
->cap_mask
))
2895 dev
->device_prep_dma_sg
= d40_prep_memcpy_sg
;
2897 if (dma_has_cap(DMA_CYCLIC
, dev
->cap_mask
))
2898 dev
->device_prep_dma_cyclic
= dma40_prep_dma_cyclic
;
2900 dev
->device_alloc_chan_resources
= d40_alloc_chan_resources
;
2901 dev
->device_free_chan_resources
= d40_free_chan_resources
;
2902 dev
->device_issue_pending
= d40_issue_pending
;
2903 dev
->device_tx_status
= d40_tx_status
;
2904 dev
->device_control
= d40_control
;
2905 dev
->dev
= base
->dev
;
2908 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2909 int num_reserved_chans
)
2913 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2914 0, base
->num_log_chans
);
2916 dma_cap_zero(base
->dma_slave
.cap_mask
);
2917 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2918 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2920 d40_ops_init(base
, &base
->dma_slave
);
2922 err
= dma_async_device_register(&base
->dma_slave
);
2925 d40_err(base
->dev
, "Failed to register slave channels\n");
2929 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2930 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2932 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2933 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2934 dma_cap_set(DMA_SG
, base
->dma_memcpy
.cap_mask
);
2936 d40_ops_init(base
, &base
->dma_memcpy
);
2938 err
= dma_async_device_register(&base
->dma_memcpy
);
2942 "Failed to regsiter memcpy only channels\n");
2946 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2947 0, num_reserved_chans
);
2949 dma_cap_zero(base
->dma_both
.cap_mask
);
2950 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2951 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2952 dma_cap_set(DMA_SG
, base
->dma_both
.cap_mask
);
2953 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2955 d40_ops_init(base
, &base
->dma_both
);
2956 err
= dma_async_device_register(&base
->dma_both
);
2960 "Failed to register logical and physical capable channels\n");
2965 dma_async_device_unregister(&base
->dma_memcpy
);
2967 dma_async_device_unregister(&base
->dma_slave
);
2972 /* Suspend resume functionality */
2974 static int dma40_pm_suspend(struct device
*dev
)
2976 struct platform_device
*pdev
= to_platform_device(dev
);
2977 struct d40_base
*base
= platform_get_drvdata(pdev
);
2980 if (base
->lcpa_regulator
)
2981 ret
= regulator_disable(base
->lcpa_regulator
);
2985 static int dma40_runtime_suspend(struct device
*dev
)
2987 struct platform_device
*pdev
= to_platform_device(dev
);
2988 struct d40_base
*base
= platform_get_drvdata(pdev
);
2990 d40_save_restore_registers(base
, true);
2992 /* Don't disable/enable clocks for v1 due to HW bugs */
2994 writel_relaxed(base
->gcc_pwr_off_mask
,
2995 base
->virtbase
+ D40_DREG_GCC
);
3000 static int dma40_runtime_resume(struct device
*dev
)
3002 struct platform_device
*pdev
= to_platform_device(dev
);
3003 struct d40_base
*base
= platform_get_drvdata(pdev
);
3005 if (base
->initialized
)
3006 d40_save_restore_registers(base
, false);
3008 writel_relaxed(D40_DREG_GCC_ENABLE_ALL
,
3009 base
->virtbase
+ D40_DREG_GCC
);
3013 static int dma40_resume(struct device
*dev
)
3015 struct platform_device
*pdev
= to_platform_device(dev
);
3016 struct d40_base
*base
= platform_get_drvdata(pdev
);
3019 if (base
->lcpa_regulator
)
3020 ret
= regulator_enable(base
->lcpa_regulator
);
3025 static const struct dev_pm_ops dma40_pm_ops
= {
3026 .suspend
= dma40_pm_suspend
,
3027 .runtime_suspend
= dma40_runtime_suspend
,
3028 .runtime_resume
= dma40_runtime_resume
,
3029 .resume
= dma40_resume
,
3031 #define DMA40_PM_OPS (&dma40_pm_ops)
3033 #define DMA40_PM_OPS NULL
3036 /* Initialization functions. */
3038 static int __init
d40_phy_res_init(struct d40_base
*base
)
3041 int num_phy_chans_avail
= 0;
3043 int odd_even_bit
= -2;
3044 int gcc
= D40_DREG_GCC_ENA
;
3046 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
3047 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
3049 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3050 base
->phy_res
[i
].num
= i
;
3051 odd_even_bit
+= 2 * ((i
% 2) == 0);
3052 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
3053 /* Mark security only channels as occupied */
3054 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
3055 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
3056 base
->phy_res
[i
].reserved
= true;
3057 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i
),
3059 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i
),
3064 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
3065 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
3066 base
->phy_res
[i
].reserved
= false;
3067 num_phy_chans_avail
++;
3069 spin_lock_init(&base
->phy_res
[i
].lock
);
3072 /* Mark disabled channels as occupied */
3073 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
3074 int chan
= base
->plat_data
->disabled_channels
[i
];
3076 base
->phy_res
[chan
].allocated_src
= D40_ALLOC_PHY
;
3077 base
->phy_res
[chan
].allocated_dst
= D40_ALLOC_PHY
;
3078 base
->phy_res
[chan
].reserved
= true;
3079 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan
),
3081 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan
),
3083 num_phy_chans_avail
--;
3086 /* Mark soft_lli channels */
3087 for (i
= 0; i
< base
->plat_data
->num_of_soft_lli_chans
; i
++) {
3088 int chan
= base
->plat_data
->soft_lli_chans
[i
];
3090 base
->phy_res
[chan
].use_soft_lli
= true;
3093 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
3094 num_phy_chans_avail
, base
->num_phy_chans
);
3096 /* Verify settings extended vs standard */
3097 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
3099 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3101 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
3102 (val
[0] & 0x3) != 1)
3104 "[%s] INFO: channel %d is misconfigured (%d)\n",
3105 __func__
, i
, val
[0] & 0x3);
3107 val
[0] = val
[0] >> 2;
3111 * To keep things simple, Enable all clocks initially.
3112 * The clocks will get managed later post channel allocation.
3113 * The clocks for the event lines on which reserved channels exists
3114 * are not managed here.
3116 writel(D40_DREG_GCC_ENABLE_ALL
, base
->virtbase
+ D40_DREG_GCC
);
3117 base
->gcc_pwr_off_mask
= gcc
;
3119 return num_phy_chans_avail
;
3122 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
3124 struct stedma40_platform_data
*plat_data
;
3125 struct clk
*clk
= NULL
;
3126 void __iomem
*virtbase
= NULL
;
3127 struct resource
*res
= NULL
;
3128 struct d40_base
*base
= NULL
;
3129 int num_log_chans
= 0;
3131 int clk_ret
= -EINVAL
;
3137 clk
= clk_get(&pdev
->dev
, NULL
);
3139 d40_err(&pdev
->dev
, "No matching clock found\n");
3143 clk_ret
= clk_prepare_enable(clk
);
3145 d40_err(&pdev
->dev
, "Failed to prepare/enable clock\n");
3149 /* Get IO for DMAC base address */
3150 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
3154 if (request_mem_region(res
->start
, resource_size(res
),
3155 D40_NAME
" I/O base") == NULL
)
3158 virtbase
= ioremap(res
->start
, resource_size(res
));
3162 /* This is just a regular AMBA PrimeCell ID actually */
3163 for (pid
= 0, i
= 0; i
< 4; i
++)
3164 pid
|= (readl(virtbase
+ resource_size(res
) - 0x20 + 4 * i
)
3166 for (cid
= 0, i
= 0; i
< 4; i
++)
3167 cid
|= (readl(virtbase
+ resource_size(res
) - 0x10 + 4 * i
)
3170 if (cid
!= AMBA_CID
) {
3171 d40_err(&pdev
->dev
, "Unknown hardware! No PrimeCell ID\n");
3174 if (AMBA_MANF_BITS(pid
) != AMBA_VENDOR_ST
) {
3175 d40_err(&pdev
->dev
, "Unknown designer! Got %x wanted %x\n",
3176 AMBA_MANF_BITS(pid
),
3182 * DB8500ed has revision 0
3184 * DB8500v1 has revision 2
3185 * DB8500v2 has revision 3
3186 * AP9540v1 has revision 4
3187 * DB8540v1 has revision 4
3189 rev
= AMBA_REV_BITS(pid
);
3191 plat_data
= pdev
->dev
.platform_data
;
3193 /* The number of physical channels on this HW */
3194 if (plat_data
->num_of_phy_chans
)
3195 num_phy_chans
= plat_data
->num_of_phy_chans
;
3197 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
3199 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x with %d physical channels\n",
3200 rev
, res
->start
, num_phy_chans
);
3203 d40_err(&pdev
->dev
, "hardware revision: %d is not supported",
3208 /* Count the number of logical channels in use */
3209 for (i
= 0; i
< plat_data
->dev_len
; i
++)
3210 if (plat_data
->dev_rx
[i
] != 0)
3213 for (i
= 0; i
< plat_data
->dev_len
; i
++)
3214 if (plat_data
->dev_tx
[i
] != 0)
3217 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
3218 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
3219 sizeof(struct d40_chan
), GFP_KERNEL
);
3222 d40_err(&pdev
->dev
, "Out of memory\n");
3228 base
->num_phy_chans
= num_phy_chans
;
3229 base
->num_log_chans
= num_log_chans
;
3230 base
->phy_start
= res
->start
;
3231 base
->phy_size
= resource_size(res
);
3232 base
->virtbase
= virtbase
;
3233 base
->plat_data
= plat_data
;
3234 base
->dev
= &pdev
->dev
;
3235 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
3236 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
3238 if (base
->plat_data
->num_of_phy_chans
== 14) {
3239 base
->gen_dmac
.backup
= d40_backup_regs_v4b
;
3240 base
->gen_dmac
.backup_size
= BACKUP_REGS_SZ_V4B
;
3241 base
->gen_dmac
.interrupt_en
= D40_DREG_CPCMIS
;
3242 base
->gen_dmac
.interrupt_clear
= D40_DREG_CPCICR
;
3243 base
->gen_dmac
.realtime_en
= D40_DREG_CRSEG1
;
3244 base
->gen_dmac
.realtime_clear
= D40_DREG_CRCEG1
;
3245 base
->gen_dmac
.high_prio_en
= D40_DREG_CPSEG1
;
3246 base
->gen_dmac
.high_prio_clear
= D40_DREG_CPCEG1
;
3247 base
->gen_dmac
.il
= il_v4b
;
3248 base
->gen_dmac
.il_size
= ARRAY_SIZE(il_v4b
);
3249 base
->gen_dmac
.init_reg
= dma_init_reg_v4b
;
3250 base
->gen_dmac
.init_reg_size
= ARRAY_SIZE(dma_init_reg_v4b
);
3252 if (base
->rev
>= 3) {
3253 base
->gen_dmac
.backup
= d40_backup_regs_v4a
;
3254 base
->gen_dmac
.backup_size
= BACKUP_REGS_SZ_V4A
;
3256 base
->gen_dmac
.interrupt_en
= D40_DREG_PCMIS
;
3257 base
->gen_dmac
.interrupt_clear
= D40_DREG_PCICR
;
3258 base
->gen_dmac
.realtime_en
= D40_DREG_RSEG1
;
3259 base
->gen_dmac
.realtime_clear
= D40_DREG_RCEG1
;
3260 base
->gen_dmac
.high_prio_en
= D40_DREG_PSEG1
;
3261 base
->gen_dmac
.high_prio_clear
= D40_DREG_PCEG1
;
3262 base
->gen_dmac
.il
= il_v4a
;
3263 base
->gen_dmac
.il_size
= ARRAY_SIZE(il_v4a
);
3264 base
->gen_dmac
.init_reg
= dma_init_reg_v4a
;
3265 base
->gen_dmac
.init_reg_size
= ARRAY_SIZE(dma_init_reg_v4a
);
3268 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
3273 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
3274 sizeof(struct d40_chan
*),
3276 if (!base
->lookup_phy_chans
)
3279 if (num_log_chans
+ plat_data
->memcpy_len
) {
3281 * The max number of logical channels are event lines for all
3282 * src devices and dst devices
3284 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
3285 sizeof(struct d40_chan
*),
3287 if (!base
->lookup_log_chans
)
3291 base
->reg_val_backup_chan
= kmalloc(base
->num_phy_chans
*
3292 sizeof(d40_backup_regs_chan
),
3294 if (!base
->reg_val_backup_chan
)
3297 base
->lcla_pool
.alloc_map
=
3298 kzalloc(num_phy_chans
* sizeof(struct d40_desc
*)
3299 * D40_LCLA_LINK_PER_EVENT_GRP
, GFP_KERNEL
);
3300 if (!base
->lcla_pool
.alloc_map
)
3303 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
3304 0, SLAB_HWCACHE_ALIGN
,
3306 if (base
->desc_slab
== NULL
)
3313 clk_disable_unprepare(clk
);
3319 release_mem_region(res
->start
,
3320 resource_size(res
));
3325 kfree(base
->lcla_pool
.alloc_map
);
3326 kfree(base
->reg_val_backup_chan
);
3327 kfree(base
->lookup_log_chans
);
3328 kfree(base
->lookup_phy_chans
);
3329 kfree(base
->phy_res
);
3336 static void __init
d40_hw_init(struct d40_base
*base
)
3340 u32 prmseo
[2] = {0, 0};
3341 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3344 struct d40_reg_val
*dma_init_reg
= base
->gen_dmac
.init_reg
;
3345 u32 reg_size
= base
->gen_dmac
.init_reg_size
;
3347 for (i
= 0; i
< reg_size
; i
++)
3348 writel(dma_init_reg
[i
].val
,
3349 base
->virtbase
+ dma_init_reg
[i
].reg
);
3351 /* Configure all our dma channels to default settings */
3352 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3354 activeo
[i
% 2] = activeo
[i
% 2] << 2;
3356 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
3358 activeo
[i
% 2] |= 3;
3362 /* Enable interrupt # */
3363 pcmis
= (pcmis
<< 1) | 1;
3365 /* Clear interrupt # */
3366 pcicr
= (pcicr
<< 1) | 1;
3368 /* Set channel to physical mode */
3369 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
3374 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
3375 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
3376 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
3377 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
3379 /* Write which interrupt to enable */
3380 writel(pcmis
, base
->virtbase
+ base
->gen_dmac
.interrupt_en
);
3382 /* Write which interrupt to clear */
3383 writel(pcicr
, base
->virtbase
+ base
->gen_dmac
.interrupt_clear
);
3385 /* These are __initdata and cannot be accessed after init */
3386 base
->gen_dmac
.init_reg
= NULL
;
3387 base
->gen_dmac
.init_reg_size
= 0;
3390 static int __init
d40_lcla_allocate(struct d40_base
*base
)
3392 struct d40_lcla_pool
*pool
= &base
->lcla_pool
;
3393 unsigned long *page_list
;
3398 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3399 * To full fill this hardware requirement without wasting 256 kb
3400 * we allocate pages until we get an aligned one.
3402 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
3410 /* Calculating how many pages that are required */
3411 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
3413 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
3414 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
3415 base
->lcla_pool
.pages
);
3416 if (!page_list
[i
]) {
3418 d40_err(base
->dev
, "Failed to allocate %d pages.\n",
3419 base
->lcla_pool
.pages
);
3421 for (j
= 0; j
< i
; j
++)
3422 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
3426 if ((virt_to_phys((void *)page_list
[i
]) &
3427 (LCLA_ALIGNMENT
- 1)) == 0)
3431 for (j
= 0; j
< i
; j
++)
3432 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
3434 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
3435 base
->lcla_pool
.base
= (void *)page_list
[i
];
3438 * After many attempts and no succees with finding the correct
3439 * alignment, try with allocating a big buffer.
3442 "[%s] Failed to get %d pages @ 18 bit align.\n",
3443 __func__
, base
->lcla_pool
.pages
);
3444 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
3445 base
->num_phy_chans
+
3448 if (!base
->lcla_pool
.base_unaligned
) {
3453 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
3457 pool
->dma_addr
= dma_map_single(base
->dev
, pool
->base
,
3458 SZ_1K
* base
->num_phy_chans
,
3460 if (dma_mapping_error(base
->dev
, pool
->dma_addr
)) {
3466 writel(virt_to_phys(base
->lcla_pool
.base
),
3467 base
->virtbase
+ D40_DREG_LCLA
);
3473 static int __init
d40_probe(struct platform_device
*pdev
)
3477 struct d40_base
*base
;
3478 struct resource
*res
= NULL
;
3479 int num_reserved_chans
;
3482 base
= d40_hw_detect_init(pdev
);
3487 num_reserved_chans
= d40_phy_res_init(base
);
3489 platform_set_drvdata(pdev
, base
);
3491 spin_lock_init(&base
->interrupt_lock
);
3492 spin_lock_init(&base
->execmd_lock
);
3494 /* Get IO for logical channel parameter address */
3495 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
3498 d40_err(&pdev
->dev
, "No \"lcpa\" memory resource\n");
3501 base
->lcpa_size
= resource_size(res
);
3502 base
->phy_lcpa
= res
->start
;
3504 if (request_mem_region(res
->start
, resource_size(res
),
3505 D40_NAME
" I/O lcpa") == NULL
) {
3508 "Failed to request LCPA region 0x%x-0x%x\n",
3509 res
->start
, res
->end
);
3513 /* We make use of ESRAM memory for this. */
3514 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
3515 if (res
->start
!= val
&& val
!= 0) {
3516 dev_warn(&pdev
->dev
,
3517 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3518 __func__
, val
, res
->start
);
3520 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
3522 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
3523 if (!base
->lcpa_base
) {
3525 d40_err(&pdev
->dev
, "Failed to ioremap LCPA region\n");
3528 /* If lcla has to be located in ESRAM we don't need to allocate */
3529 if (base
->plat_data
->use_esram_lcla
) {
3530 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3535 "No \"lcla_esram\" memory resource\n");
3538 base
->lcla_pool
.base
= ioremap(res
->start
,
3539 resource_size(res
));
3540 if (!base
->lcla_pool
.base
) {
3542 d40_err(&pdev
->dev
, "Failed to ioremap LCLA region\n");
3545 writel(res
->start
, base
->virtbase
+ D40_DREG_LCLA
);
3548 ret
= d40_lcla_allocate(base
);
3550 d40_err(&pdev
->dev
, "Failed to allocate LCLA area\n");
3555 spin_lock_init(&base
->lcla_pool
.lock
);
3557 base
->irq
= platform_get_irq(pdev
, 0);
3559 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
3561 d40_err(&pdev
->dev
, "No IRQ defined\n");
3565 pm_runtime_irq_safe(base
->dev
);
3566 pm_runtime_set_autosuspend_delay(base
->dev
, DMA40_AUTOSUSPEND_DELAY
);
3567 pm_runtime_use_autosuspend(base
->dev
);
3568 pm_runtime_enable(base
->dev
);
3569 pm_runtime_resume(base
->dev
);
3571 if (base
->plat_data
->use_esram_lcla
) {
3573 base
->lcpa_regulator
= regulator_get(base
->dev
, "lcla_esram");
3574 if (IS_ERR(base
->lcpa_regulator
)) {
3575 d40_err(&pdev
->dev
, "Failed to get lcpa_regulator\n");
3576 base
->lcpa_regulator
= NULL
;
3580 ret
= regulator_enable(base
->lcpa_regulator
);
3583 "Failed to enable lcpa_regulator\n");
3584 regulator_put(base
->lcpa_regulator
);
3585 base
->lcpa_regulator
= NULL
;
3590 base
->initialized
= true;
3591 err
= d40_dmaengine_init(base
, num_reserved_chans
);
3595 base
->dev
->dma_parms
= &base
->dma_parms
;
3596 err
= dma_set_max_seg_size(base
->dev
, STEDMA40_MAX_SEG_SIZE
);
3598 d40_err(&pdev
->dev
, "Failed to set dma max seg size\n");
3604 dev_info(base
->dev
, "initialized\n");
3609 if (base
->desc_slab
)
3610 kmem_cache_destroy(base
->desc_slab
);
3612 iounmap(base
->virtbase
);
3614 if (base
->lcla_pool
.base
&& base
->plat_data
->use_esram_lcla
) {
3615 iounmap(base
->lcla_pool
.base
);
3616 base
->lcla_pool
.base
= NULL
;
3619 if (base
->lcla_pool
.dma_addr
)
3620 dma_unmap_single(base
->dev
, base
->lcla_pool
.dma_addr
,
3621 SZ_1K
* base
->num_phy_chans
,
3624 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
3625 free_pages((unsigned long)base
->lcla_pool
.base
,
3626 base
->lcla_pool
.pages
);
3628 kfree(base
->lcla_pool
.base_unaligned
);
3631 release_mem_region(base
->phy_lcpa
,
3633 if (base
->phy_start
)
3634 release_mem_region(base
->phy_start
,
3637 clk_disable_unprepare(base
->clk
);
3641 if (base
->lcpa_regulator
) {
3642 regulator_disable(base
->lcpa_regulator
);
3643 regulator_put(base
->lcpa_regulator
);
3646 kfree(base
->lcla_pool
.alloc_map
);
3647 kfree(base
->lookup_log_chans
);
3648 kfree(base
->lookup_phy_chans
);
3649 kfree(base
->phy_res
);
3653 d40_err(&pdev
->dev
, "probe failed\n");
3657 static struct platform_driver d40_driver
= {
3659 .owner
= THIS_MODULE
,
3665 static int __init
stedma40_init(void)
3667 return platform_driver_probe(&d40_driver
, d40_probe
);
3669 subsys_initcall(stedma40_init
);