2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
105 /* Register Direct Mode Registers */
106 #define XILINX_DMA_REG_VSIZE 0x0000
107 #define XILINX_DMA_REG_HSIZE 0x0004
109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
116 /* HW specific definitions */
117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
122 XILINX_DMA_DMASR_ERR_IRQ)
124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
126 XILINX_DMA_DMASR_SOF_LATE_ERR | \
127 XILINX_DMA_DMASR_SG_DEC_ERR | \
128 XILINX_DMA_DMASR_SG_SLV_ERR | \
129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
131 XILINX_DMA_DMASR_DMA_DEC_ERR | \
132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
133 XILINX_DMA_DMASR_DMA_INT_ERR)
136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
138 * is enabled in the h/w system.
140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_INT_ERR)
146 /* Axi VDMA Flush on Fsync bits */
147 #define XILINX_DMA_FLUSH_S2MM 3
148 #define XILINX_DMA_FLUSH_MM2S 2
149 #define XILINX_DMA_FLUSH_BOTH 1
151 /* Delay loop counter to prevent hardware failure */
152 #define XILINX_DMA_LOOP_COUNT 1000000
154 /* AXI DMA Specific Registers/Offsets */
155 #define XILINX_DMA_REG_SRCDSTADDR 0x18
156 #define XILINX_DMA_REG_BTT 0x28
158 /* AXI DMA Specific Masks/Bit fields */
159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
162 #define XILINX_DMA_CR_COALESCE_SHIFT 16
163 #define XILINX_DMA_BD_SOP BIT(27)
164 #define XILINX_DMA_BD_EOP BIT(26)
165 #define XILINX_DMA_COALESCE_MAX 255
166 #define XILINX_DMA_NUM_APP_WORDS 5
168 /* Multi-Channel DMA Descriptor offsets*/
169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
172 /* Multi-Channel DMA Masks/Shifts */
173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
177 #define XILINX_DMA_BD_STRIDE_SHIFT 0
178 #define XILINX_DMA_BD_VSIZE_SHIFT 19
180 /* AXI CDMA Specific Registers/Offsets */
181 #define XILINX_CDMA_REG_SRCADDR 0x18
182 #define XILINX_CDMA_REG_DSTADDR 0x20
184 /* AXI CDMA Specific Masks */
185 #define XILINX_CDMA_CR_SGMODE BIT(3)
188 * struct xilinx_vdma_desc_hw - Hardware Descriptor
189 * @next_desc: Next Descriptor Pointer @0x00
190 * @pad1: Reserved @0x04
191 * @buf_addr: Buffer address @0x08
192 * @buf_addr_msb: MSB of Buffer address @0x0C
193 * @vsize: Vertical Size @0x10
194 * @hsize: Horizontal Size @0x14
195 * @stride: Number of bytes between the first
196 * pixels of each horizontal line @0x18
198 struct xilinx_vdma_desc_hw
{
209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
210 * @next_desc: Next Descriptor Pointer @0x00
211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
212 * @buf_addr: Buffer address @0x08
213 * @buf_addr_msb: MSB of Buffer address @0x0C
214 * @pad1: Reserved @0x10
215 * @pad2: Reserved @0x14
216 * @control: Control field @0x18
217 * @status: Status field @0x1C
218 * @app: APP Fields @0x20 - 0x30
220 struct xilinx_axidma_desc_hw
{
229 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
233 * struct xilinx_cdma_desc_hw - Hardware Descriptor
234 * @next_desc: Next Descriptor Pointer @0x00
235 * @next_descmsb: Next Descriptor Pointer MSB @0x04
236 * @src_addr: Source address @0x08
237 * @src_addrmsb: Source address MSB @0x0C
238 * @dest_addr: Destination address @0x10
239 * @dest_addrmsb: Destination address MSB @0x14
240 * @control: Control field @0x18
241 * @status: Status field @0x1C
243 struct xilinx_cdma_desc_hw
{
255 * struct xilinx_vdma_tx_segment - Descriptor segment
256 * @hw: Hardware descriptor
257 * @node: Node in the descriptor segments list
258 * @phys: Physical address of segment
260 struct xilinx_vdma_tx_segment
{
261 struct xilinx_vdma_desc_hw hw
;
262 struct list_head node
;
267 * struct xilinx_axidma_tx_segment - Descriptor segment
268 * @hw: Hardware descriptor
269 * @node: Node in the descriptor segments list
270 * @phys: Physical address of segment
272 struct xilinx_axidma_tx_segment
{
273 struct xilinx_axidma_desc_hw hw
;
274 struct list_head node
;
279 * struct xilinx_cdma_tx_segment - Descriptor segment
280 * @hw: Hardware descriptor
281 * @node: Node in the descriptor segments list
282 * @phys: Physical address of segment
284 struct xilinx_cdma_tx_segment
{
285 struct xilinx_cdma_desc_hw hw
;
286 struct list_head node
;
291 * struct xilinx_dma_tx_descriptor - Per Transaction structure
292 * @async_tx: Async transaction descriptor
293 * @segments: TX segments list
294 * @node: Node in the channel descriptors list
295 * @cyclic: Check for cyclic transfers.
297 struct xilinx_dma_tx_descriptor
{
298 struct dma_async_tx_descriptor async_tx
;
299 struct list_head segments
;
300 struct list_head node
;
305 * struct xilinx_dma_chan - Driver specific DMA channel structure
306 * @xdev: Driver specific device structure
307 * @ctrl_offset: Control registers offset
308 * @desc_offset: TX descriptor registers offset
309 * @lock: Descriptor operation lock
310 * @pending_list: Descriptors waiting
311 * @active_list: Descriptors ready to submit
312 * @done_list: Complete descriptors
313 * @common: DMA common channel
314 * @desc_pool: Descriptors pool
315 * @dev: The dma device
318 * @direction: Transfer direction
319 * @num_frms: Number of frames
320 * @has_sg: Support scatter transfers
321 * @cyclic: Check for cyclic transfers.
322 * @genlock: Support genlock mode
323 * @err: Channel has errors
324 * @tasklet: Cleanup work after irq
325 * @config: Device configuration info
326 * @flush_on_fsync: Flush on Frame sync
327 * @desc_pendingcount: Descriptor pending count
328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
329 * @desc_submitcount: Descriptor h/w submitted count
330 * @residue: Residue for AXI DMA
331 * @seg_v: Statically allocated segments base
332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
333 * @start_transfer: Differentiate b/w DMA IP's transfer
335 struct xilinx_dma_chan
{
336 struct xilinx_dma_device
*xdev
;
340 struct list_head pending_list
;
341 struct list_head active_list
;
342 struct list_head done_list
;
343 struct dma_chan common
;
344 struct dma_pool
*desc_pool
;
348 enum dma_transfer_direction direction
;
354 struct tasklet_struct tasklet
;
355 struct xilinx_vdma_config config
;
357 u32 desc_pendingcount
;
359 u32 desc_submitcount
;
361 struct xilinx_axidma_tx_segment
*seg_v
;
362 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
363 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
367 struct xilinx_dma_config
{
368 enum xdma_ip_type dmatype
;
369 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
370 struct clk
**tx_clk
, struct clk
**txs_clk
,
371 struct clk
**rx_clk
, struct clk
**rxs_clk
);
375 * struct xilinx_dma_device - DMA device structure
376 * @regs: I/O mapped base address
377 * @dev: Device Structure
378 * @common: DMA device structure
379 * @chan: Driver specific DMA channel
380 * @has_sg: Specifies whether Scatter-Gather is present or not
381 * @mcdma: Specifies whether Multi-Channel is present or not
382 * @flush_on_fsync: Flush on frame sync
383 * @ext_addr: Indicates 64 bit addressing is supported by dma device
384 * @pdev: Platform device structure pointer
385 * @dma_config: DMA config structure
386 * @axi_clk: DMA Axi4-lite interace clock
387 * @tx_clk: DMA mm2s clock
388 * @txs_clk: DMA mm2s stream clock
389 * @rx_clk: DMA s2mm clock
390 * @rxs_clk: DMA s2mm stream clock
391 * @nr_channels: Number of channels DMA device supports
392 * @chan_id: DMA channel identifier
394 struct xilinx_dma_device
{
397 struct dma_device common
;
398 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
403 struct platform_device
*pdev
;
404 const struct xilinx_dma_config
*dma_config
;
415 #define to_xilinx_chan(chan) \
416 container_of(chan, struct xilinx_dma_chan, common)
417 #define to_dma_tx_descriptor(tx) \
418 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
419 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
420 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
421 cond, delay_us, timeout_us)
424 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
426 return ioread32(chan
->xdev
->regs
+ reg
);
429 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
431 iowrite32(value
, chan
->xdev
->regs
+ reg
);
434 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
437 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
440 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
442 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
445 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
448 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
451 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
454 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
457 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
460 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
464 * vdma_desc_write_64 - 64-bit descriptor write
465 * @chan: Driver specific VDMA channel
466 * @reg: Register to write
467 * @value_lsb: lower address of the descriptor.
468 * @value_msb: upper address of the descriptor.
470 * Since vdma driver is trying to write to a register offset which is not a
471 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
472 * instead of a single 64 bit register write.
474 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
475 u32 value_lsb
, u32 value_msb
)
477 /* Write the lsb 32 bits*/
478 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
480 /* Write the msb 32 bits */
481 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
484 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
486 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
489 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
493 dma_writeq(chan
, reg
, addr
);
495 dma_ctrl_write(chan
, reg
, addr
);
498 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
499 struct xilinx_axidma_desc_hw
*hw
,
500 dma_addr_t buf_addr
, size_t sg_used
,
503 if (chan
->ext_addr
) {
504 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
505 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
508 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
512 /* -----------------------------------------------------------------------------
513 * Descriptors and segments alloc and free
517 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
518 * @chan: Driver specific DMA channel
520 * Return: The allocated segment on success and NULL on failure.
522 static struct xilinx_vdma_tx_segment
*
523 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
525 struct xilinx_vdma_tx_segment
*segment
;
528 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
532 segment
->phys
= phys
;
538 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
539 * @chan: Driver specific DMA channel
541 * Return: The allocated segment on success and NULL on failure.
543 static struct xilinx_cdma_tx_segment
*
544 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
546 struct xilinx_cdma_tx_segment
*segment
;
549 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
553 segment
->phys
= phys
;
559 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
560 * @chan: Driver specific DMA channel
562 * Return: The allocated segment on success and NULL on failure.
564 static struct xilinx_axidma_tx_segment
*
565 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
567 struct xilinx_axidma_tx_segment
*segment
;
570 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
574 segment
->phys
= phys
;
580 * xilinx_dma_free_tx_segment - Free transaction segment
581 * @chan: Driver specific DMA channel
582 * @segment: DMA transaction segment
584 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
585 struct xilinx_axidma_tx_segment
*segment
)
587 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
591 * xilinx_cdma_free_tx_segment - Free transaction segment
592 * @chan: Driver specific DMA channel
593 * @segment: DMA transaction segment
595 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
596 struct xilinx_cdma_tx_segment
*segment
)
598 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
602 * xilinx_vdma_free_tx_segment - Free transaction segment
603 * @chan: Driver specific DMA channel
604 * @segment: DMA transaction segment
606 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
607 struct xilinx_vdma_tx_segment
*segment
)
609 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
613 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
614 * @chan: Driver specific DMA channel
616 * Return: The allocated descriptor on success and NULL on failure.
618 static struct xilinx_dma_tx_descriptor
*
619 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
621 struct xilinx_dma_tx_descriptor
*desc
;
623 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
627 INIT_LIST_HEAD(&desc
->segments
);
633 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
634 * @chan: Driver specific DMA channel
635 * @desc: DMA transaction descriptor
638 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
639 struct xilinx_dma_tx_descriptor
*desc
)
641 struct xilinx_vdma_tx_segment
*segment
, *next
;
642 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
643 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
648 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
649 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
650 list_del(&segment
->node
);
651 xilinx_vdma_free_tx_segment(chan
, segment
);
653 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
654 list_for_each_entry_safe(cdma_segment
, cdma_next
,
655 &desc
->segments
, node
) {
656 list_del(&cdma_segment
->node
);
657 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
660 list_for_each_entry_safe(axidma_segment
, axidma_next
,
661 &desc
->segments
, node
) {
662 list_del(&axidma_segment
->node
);
663 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
670 /* Required functions */
673 * xilinx_dma_free_desc_list - Free descriptors list
674 * @chan: Driver specific DMA channel
675 * @list: List to parse and delete the descriptor
677 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
678 struct list_head
*list
)
680 struct xilinx_dma_tx_descriptor
*desc
, *next
;
682 list_for_each_entry_safe(desc
, next
, list
, node
) {
683 list_del(&desc
->node
);
684 xilinx_dma_free_tx_descriptor(chan
, desc
);
689 * xilinx_dma_free_descriptors - Free channel descriptors
690 * @chan: Driver specific DMA channel
692 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
696 spin_lock_irqsave(&chan
->lock
, flags
);
698 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
699 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
700 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
702 spin_unlock_irqrestore(&chan
->lock
, flags
);
706 * xilinx_dma_free_chan_resources - Free channel resources
707 * @dchan: DMA channel
709 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
711 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
713 dev_dbg(chan
->dev
, "Free all channel resources.\n");
715 xilinx_dma_free_descriptors(chan
);
716 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
717 xilinx_dma_free_tx_segment(chan
, chan
->cyclic_seg_v
);
718 xilinx_dma_free_tx_segment(chan
, chan
->seg_v
);
720 dma_pool_destroy(chan
->desc_pool
);
721 chan
->desc_pool
= NULL
;
725 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
726 * @chan: Driver specific dma channel
727 * @desc: dma transaction descriptor
728 * @flags: flags for spin lock
730 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
731 struct xilinx_dma_tx_descriptor
*desc
,
732 unsigned long *flags
)
734 dma_async_tx_callback callback
;
735 void *callback_param
;
737 callback
= desc
->async_tx
.callback
;
738 callback_param
= desc
->async_tx
.callback_param
;
740 spin_unlock_irqrestore(&chan
->lock
, *flags
);
741 callback(callback_param
);
742 spin_lock_irqsave(&chan
->lock
, *flags
);
747 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
748 * @chan: Driver specific DMA channel
750 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
752 struct xilinx_dma_tx_descriptor
*desc
, *next
;
755 spin_lock_irqsave(&chan
->lock
, flags
);
757 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
758 dma_async_tx_callback callback
;
759 void *callback_param
;
762 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
766 /* Remove from the list of running transactions */
767 list_del(&desc
->node
);
769 /* Run the link descriptor callback function */
770 callback
= desc
->async_tx
.callback
;
771 callback_param
= desc
->async_tx
.callback_param
;
773 spin_unlock_irqrestore(&chan
->lock
, flags
);
774 callback(callback_param
);
775 spin_lock_irqsave(&chan
->lock
, flags
);
778 /* Run any dependencies, then free the descriptor */
779 dma_run_dependencies(&desc
->async_tx
);
780 xilinx_dma_free_tx_descriptor(chan
, desc
);
783 spin_unlock_irqrestore(&chan
->lock
, flags
);
787 * xilinx_dma_do_tasklet - Schedule completion tasklet
788 * @data: Pointer to the Xilinx DMA channel structure
790 static void xilinx_dma_do_tasklet(unsigned long data
)
792 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
794 xilinx_dma_chan_desc_cleanup(chan
);
798 * xilinx_dma_alloc_chan_resources - Allocate channel resources
799 * @dchan: DMA channel
801 * Return: '0' on success and failure value on error
803 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
805 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
807 /* Has this channel already been allocated? */
812 * We need the descriptor to be aligned to 64bytes
813 * for meeting Xilinx VDMA specification requirement.
815 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
816 chan
->desc_pool
= dma_pool_create("xilinx_dma_desc_pool",
818 sizeof(struct xilinx_axidma_tx_segment
),
819 __alignof__(struct xilinx_axidma_tx_segment
),
821 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
822 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
824 sizeof(struct xilinx_cdma_tx_segment
),
825 __alignof__(struct xilinx_cdma_tx_segment
),
828 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
830 sizeof(struct xilinx_vdma_tx_segment
),
831 __alignof__(struct xilinx_vdma_tx_segment
),
835 if (!chan
->desc_pool
) {
837 "unable to allocate channel %d descriptor pool\n",
842 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
844 * For AXI DMA case after submitting a pending_list, keep
845 * an extra segment allocated so that the "next descriptor"
846 * pointer on the tail descriptor always points to a
847 * valid descriptor, even when paused after reaching taildesc.
848 * This way, it is possible to issue additional
849 * transfers without halting and restarting the channel.
851 chan
->seg_v
= xilinx_axidma_alloc_tx_segment(chan
);
854 * For cyclic DMA mode we need to program the tail Descriptor
855 * register with a value which is not a part of the BD chain
856 * so allocating a desc segment during channel allocation for
857 * programming tail descriptor.
859 chan
->cyclic_seg_v
= xilinx_axidma_alloc_tx_segment(chan
);
862 dma_cookie_init(dchan
);
864 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
865 /* For AXI DMA resetting once channel will reset the
866 * other channel as well so enable the interrupts here.
868 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
869 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
872 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
873 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
874 XILINX_CDMA_CR_SGMODE
);
880 * xilinx_dma_tx_status - Get DMA transaction status
881 * @dchan: DMA channel
882 * @cookie: Transaction identifier
883 * @txstate: Transaction state
885 * Return: DMA transaction status
887 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
889 struct dma_tx_state
*txstate
)
891 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
892 struct xilinx_dma_tx_descriptor
*desc
;
893 struct xilinx_axidma_tx_segment
*segment
;
894 struct xilinx_axidma_desc_hw
*hw
;
899 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
900 if (ret
== DMA_COMPLETE
|| !txstate
)
903 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
904 spin_lock_irqsave(&chan
->lock
, flags
);
906 desc
= list_last_entry(&chan
->active_list
,
907 struct xilinx_dma_tx_descriptor
, node
);
909 list_for_each_entry(segment
, &desc
->segments
, node
) {
911 residue
+= (hw
->control
- hw
->status
) &
912 XILINX_DMA_MAX_TRANS_LEN
;
915 spin_unlock_irqrestore(&chan
->lock
, flags
);
917 chan
->residue
= residue
;
918 dma_set_residue(txstate
, chan
->residue
);
925 * xilinx_dma_is_running - Check if DMA channel is running
926 * @chan: Driver specific DMA channel
928 * Return: '1' if running, '0' if not.
930 static bool xilinx_dma_is_running(struct xilinx_dma_chan
*chan
)
932 return !(dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
933 XILINX_DMA_DMASR_HALTED
) &&
934 (dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
) &
935 XILINX_DMA_DMACR_RUNSTOP
);
939 * xilinx_dma_is_idle - Check if DMA channel is idle
940 * @chan: Driver specific DMA channel
942 * Return: '1' if idle, '0' if not.
944 static bool xilinx_dma_is_idle(struct xilinx_dma_chan
*chan
)
946 return dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
947 XILINX_DMA_DMASR_IDLE
;
951 * xilinx_dma_halt - Halt DMA channel
952 * @chan: Driver specific DMA channel
954 static void xilinx_dma_halt(struct xilinx_dma_chan
*chan
)
959 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
961 /* Wait for the hardware to halt */
962 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
963 (val
& XILINX_DMA_DMASR_HALTED
), 0,
964 XILINX_DMA_LOOP_COUNT
);
967 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
968 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
974 * xilinx_dma_start - Start DMA channel
975 * @chan: Driver specific DMA channel
977 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
982 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
984 /* Wait for the hardware to start */
985 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
986 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
987 XILINX_DMA_LOOP_COUNT
);
990 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
991 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
998 * xilinx_vdma_start_transfer - Starts VDMA transfer
999 * @chan: Driver specific channel struct pointer
1001 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1003 struct xilinx_vdma_config
*config
= &chan
->config
;
1004 struct xilinx_dma_tx_descriptor
*desc
, *tail_desc
;
1006 struct xilinx_vdma_tx_segment
*tail_segment
;
1008 /* This function was invoked with lock held */
1012 if (list_empty(&chan
->pending_list
))
1015 desc
= list_first_entry(&chan
->pending_list
,
1016 struct xilinx_dma_tx_descriptor
, node
);
1017 tail_desc
= list_last_entry(&chan
->pending_list
,
1018 struct xilinx_dma_tx_descriptor
, node
);
1020 tail_segment
= list_last_entry(&tail_desc
->segments
,
1021 struct xilinx_vdma_tx_segment
, node
);
1023 /* If it is SG mode and hardware is busy, cannot submit */
1024 if (chan
->has_sg
&& xilinx_dma_is_running(chan
) &&
1025 !xilinx_dma_is_idle(chan
)) {
1026 dev_dbg(chan
->dev
, "DMA controller still busy\n");
1031 * If hardware is idle, then all descriptors on the running lists are
1032 * done, start new transfers
1035 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1036 desc
->async_tx
.phys
);
1038 /* Configure the hardware using info in the config structure */
1039 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1041 if (config
->frm_cnt_en
)
1042 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1044 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1046 /* Configure channel to allow number frame buffers */
1047 dma_ctrl_write(chan
, XILINX_DMA_REG_FRMSTORE
,
1048 chan
->desc_pendingcount
);
1051 * With SG, start with circular mode, so that BDs can be fetched.
1052 * In direct register mode, if not parking, enable circular mode
1054 if (chan
->has_sg
|| !config
->park
)
1055 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1058 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1060 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1062 if (config
->park
&& (config
->park_frm
>= 0) &&
1063 (config
->park_frm
< chan
->num_frms
)) {
1064 if (chan
->direction
== DMA_MEM_TO_DEV
)
1065 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
,
1067 XILINX_DMA_PARK_PTR_RD_REF_SHIFT
);
1069 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
,
1071 XILINX_DMA_PARK_PTR_WR_REF_SHIFT
);
1074 /* Start the hardware */
1075 xilinx_dma_start(chan
);
1080 /* Start the transfer */
1082 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1083 tail_segment
->phys
);
1085 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1088 if (chan
->desc_submitcount
< chan
->num_frms
)
1089 i
= chan
->desc_submitcount
;
1091 list_for_each_entry(segment
, &desc
->segments
, node
) {
1093 vdma_desc_write_64(chan
,
1094 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1095 segment
->hw
.buf_addr
,
1096 segment
->hw
.buf_addr_msb
);
1098 vdma_desc_write(chan
,
1099 XILINX_VDMA_REG_START_ADDRESS(i
++),
1100 segment
->hw
.buf_addr
);
1108 /* HW expects these parameters to be same for one transaction */
1109 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1110 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1112 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1115 if (!chan
->has_sg
) {
1116 list_del(&desc
->node
);
1117 list_add_tail(&desc
->node
, &chan
->active_list
);
1118 chan
->desc_submitcount
++;
1119 chan
->desc_pendingcount
--;
1120 if (chan
->desc_submitcount
== chan
->num_frms
)
1121 chan
->desc_submitcount
= 0;
1123 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1124 chan
->desc_pendingcount
= 0;
1129 * xilinx_cdma_start_transfer - Starts cdma transfer
1130 * @chan: Driver specific channel struct pointer
1132 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1134 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1135 struct xilinx_cdma_tx_segment
*tail_segment
;
1136 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1141 if (list_empty(&chan
->pending_list
))
1144 head_desc
= list_first_entry(&chan
->pending_list
,
1145 struct xilinx_dma_tx_descriptor
, node
);
1146 tail_desc
= list_last_entry(&chan
->pending_list
,
1147 struct xilinx_dma_tx_descriptor
, node
);
1148 tail_segment
= list_last_entry(&tail_desc
->segments
,
1149 struct xilinx_cdma_tx_segment
, node
);
1151 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1152 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1153 ctrl_reg
|= chan
->desc_pendingcount
<<
1154 XILINX_DMA_CR_COALESCE_SHIFT
;
1155 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1159 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1160 head_desc
->async_tx
.phys
);
1162 /* Update tail ptr register which will start the transfer */
1163 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1164 tail_segment
->phys
);
1166 /* In simple mode */
1167 struct xilinx_cdma_tx_segment
*segment
;
1168 struct xilinx_cdma_desc_hw
*hw
;
1170 segment
= list_first_entry(&head_desc
->segments
,
1171 struct xilinx_cdma_tx_segment
,
1176 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
, hw
->src_addr
);
1177 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
, hw
->dest_addr
);
1179 /* Start the transfer */
1180 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1181 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1184 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1185 chan
->desc_pendingcount
= 0;
1189 * xilinx_dma_start_transfer - Starts DMA transfer
1190 * @chan: Driver specific channel struct pointer
1192 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1194 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1195 struct xilinx_axidma_tx_segment
*tail_segment
, *old_head
, *new_head
;
1201 if (list_empty(&chan
->pending_list
))
1204 /* If it is SG mode and hardware is busy, cannot submit */
1205 if (chan
->has_sg
&& xilinx_dma_is_running(chan
) &&
1206 !xilinx_dma_is_idle(chan
)) {
1207 dev_dbg(chan
->dev
, "DMA controller still busy\n");
1211 head_desc
= list_first_entry(&chan
->pending_list
,
1212 struct xilinx_dma_tx_descriptor
, node
);
1213 tail_desc
= list_last_entry(&chan
->pending_list
,
1214 struct xilinx_dma_tx_descriptor
, node
);
1215 tail_segment
= list_last_entry(&tail_desc
->segments
,
1216 struct xilinx_axidma_tx_segment
, node
);
1218 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1219 old_head
= list_first_entry(&head_desc
->segments
,
1220 struct xilinx_axidma_tx_segment
, node
);
1221 new_head
= chan
->seg_v
;
1222 /* Copy Buffer Descriptor fields. */
1223 new_head
->hw
= old_head
->hw
;
1225 /* Swap and save new reserve */
1226 list_replace_init(&old_head
->node
, &new_head
->node
);
1227 chan
->seg_v
= old_head
;
1229 tail_segment
->hw
.next_desc
= chan
->seg_v
->phys
;
1230 head_desc
->async_tx
.phys
= new_head
->phys
;
1233 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1235 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1236 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1237 reg
|= chan
->desc_pendingcount
<<
1238 XILINX_DMA_CR_COALESCE_SHIFT
;
1239 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1242 if (chan
->has_sg
&& !chan
->xdev
->mcdma
)
1243 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1244 head_desc
->async_tx
.phys
);
1246 if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1247 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1248 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1249 head_desc
->async_tx
.phys
);
1252 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1253 head_desc
->async_tx
.phys
);
1255 dma_ctrl_write(chan
,
1256 XILINX_DMA_MCRX_CDESC(chan
->tdest
),
1257 head_desc
->async_tx
.phys
);
1262 xilinx_dma_start(chan
);
1267 /* Start the transfer */
1268 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1270 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1271 chan
->cyclic_seg_v
->phys
);
1273 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1274 tail_segment
->phys
);
1275 } else if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1276 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1277 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1278 tail_segment
->phys
);
1281 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1282 tail_segment
->phys
);
1284 dma_ctrl_write(chan
,
1285 XILINX_DMA_MCRX_TDESC(chan
->tdest
),
1286 tail_segment
->phys
);
1290 struct xilinx_axidma_tx_segment
*segment
;
1291 struct xilinx_axidma_desc_hw
*hw
;
1293 segment
= list_first_entry(&head_desc
->segments
,
1294 struct xilinx_axidma_tx_segment
,
1298 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
, hw
->buf_addr
);
1300 /* Start the transfer */
1301 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1302 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1305 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1306 chan
->desc_pendingcount
= 0;
1310 * xilinx_dma_issue_pending - Issue pending transactions
1311 * @dchan: DMA channel
1313 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1315 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1316 unsigned long flags
;
1318 spin_lock_irqsave(&chan
->lock
, flags
);
1319 chan
->start_transfer(chan
);
1320 spin_unlock_irqrestore(&chan
->lock
, flags
);
1324 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1325 * @chan : xilinx DMA channel
1329 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1331 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1333 /* This function was invoked with lock held */
1334 if (list_empty(&chan
->active_list
))
1337 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1338 list_del(&desc
->node
);
1340 dma_cookie_complete(&desc
->async_tx
);
1341 list_add_tail(&desc
->node
, &chan
->done_list
);
1346 * xilinx_dma_reset - Reset DMA channel
1347 * @chan: Driver specific DMA channel
1349 * Return: '0' on success and failure value on error
1351 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1356 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1358 /* Wait for the hardware to finish reset */
1359 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1360 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1361 XILINX_DMA_LOOP_COUNT
);
1364 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1365 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1366 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1376 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1377 * @chan: Driver specific DMA channel
1379 * Return: '0' on success and failure value on error
1381 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1386 err
= xilinx_dma_reset(chan
);
1390 /* Enable interrupts */
1391 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1392 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1398 * xilinx_dma_irq_handler - DMA Interrupt handler
1400 * @data: Pointer to the Xilinx DMA channel structure
1402 * Return: IRQ_HANDLED/IRQ_NONE
1404 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1406 struct xilinx_dma_chan
*chan
= data
;
1409 /* Read the status and ack the interrupts. */
1410 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1411 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1414 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1415 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1417 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1419 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1420 * error is recoverable, ignore it. Otherwise flag the error.
1422 * Only recoverable errors can be cleared in the DMASR register,
1423 * make sure not to write to other error bits to 1.
1425 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1427 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1428 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1430 if (!chan
->flush_on_fsync
||
1431 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1433 "Channel %p has errors %x, cdr %x tdr %x\n",
1435 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1436 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1441 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1443 * Device takes too long to do the transfer when user requires
1446 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1449 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1450 spin_lock(&chan
->lock
);
1451 xilinx_dma_complete_descriptor(chan
);
1452 chan
->start_transfer(chan
);
1453 spin_unlock(&chan
->lock
);
1456 tasklet_schedule(&chan
->tasklet
);
1461 * append_desc_queue - Queuing descriptor
1462 * @chan: Driver specific dma channel
1463 * @desc: dma transaction descriptor
1465 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1466 struct xilinx_dma_tx_descriptor
*desc
)
1468 struct xilinx_vdma_tx_segment
*tail_segment
;
1469 struct xilinx_dma_tx_descriptor
*tail_desc
;
1470 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1471 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1473 if (list_empty(&chan
->pending_list
))
1477 * Add the hardware descriptor to the chain of hardware descriptors
1478 * that already exists in memory.
1480 tail_desc
= list_last_entry(&chan
->pending_list
,
1481 struct xilinx_dma_tx_descriptor
, node
);
1482 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1483 tail_segment
= list_last_entry(&tail_desc
->segments
,
1484 struct xilinx_vdma_tx_segment
,
1486 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1487 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1488 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1489 struct xilinx_cdma_tx_segment
,
1491 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1493 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1494 struct xilinx_axidma_tx_segment
,
1496 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1500 * Add the software descriptor and all children to the list
1501 * of pending transactions
1504 list_add_tail(&desc
->node
, &chan
->pending_list
);
1505 chan
->desc_pendingcount
++;
1507 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1508 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1509 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1510 chan
->desc_pendingcount
= chan
->num_frms
;
1515 * xilinx_dma_tx_submit - Submit DMA transaction
1516 * @tx: Async transaction descriptor
1518 * Return: cookie value on success and failure value on error
1520 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1522 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1523 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1524 dma_cookie_t cookie
;
1525 unsigned long flags
;
1529 xilinx_dma_free_tx_descriptor(chan
, desc
);
1535 * If reset fails, need to hard reset the system.
1536 * Channel is no longer functional
1538 err
= xilinx_dma_chan_reset(chan
);
1543 spin_lock_irqsave(&chan
->lock
, flags
);
1545 cookie
= dma_cookie_assign(tx
);
1547 /* Put this transaction onto the tail of the pending queue */
1548 append_desc_queue(chan
, desc
);
1551 chan
->cyclic
= true;
1553 spin_unlock_irqrestore(&chan
->lock
, flags
);
1559 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1560 * DMA_SLAVE transaction
1561 * @dchan: DMA channel
1562 * @xt: Interleaved template pointer
1563 * @flags: transfer ack flags
1565 * Return: Async transaction descriptor on success and NULL on failure
1567 static struct dma_async_tx_descriptor
*
1568 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1569 struct dma_interleaved_template
*xt
,
1570 unsigned long flags
)
1572 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1573 struct xilinx_dma_tx_descriptor
*desc
;
1574 struct xilinx_vdma_tx_segment
*segment
, *prev
= NULL
;
1575 struct xilinx_vdma_desc_hw
*hw
;
1577 if (!is_slave_direction(xt
->dir
))
1580 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1583 if (xt
->frame_size
!= 1)
1586 /* Allocate a transaction descriptor. */
1587 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1591 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1592 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1593 async_tx_ack(&desc
->async_tx
);
1595 /* Allocate the link descriptor from DMA pool */
1596 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1600 /* Fill in the hardware descriptor */
1602 hw
->vsize
= xt
->numf
;
1603 hw
->hsize
= xt
->sgl
[0].size
;
1604 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
1605 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
1606 hw
->stride
|= chan
->config
.frm_dly
<<
1607 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
1609 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
1610 if (chan
->ext_addr
) {
1611 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
1612 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
1614 hw
->buf_addr
= xt
->dst_start
;
1617 if (chan
->ext_addr
) {
1618 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
1619 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
1621 hw
->buf_addr
= xt
->src_start
;
1625 /* Insert the segment into the descriptor segments list. */
1626 list_add_tail(&segment
->node
, &desc
->segments
);
1630 /* Link the last hardware descriptor with the first. */
1631 segment
= list_first_entry(&desc
->segments
,
1632 struct xilinx_vdma_tx_segment
, node
);
1633 desc
->async_tx
.phys
= segment
->phys
;
1635 return &desc
->async_tx
;
1638 xilinx_dma_free_tx_descriptor(chan
, desc
);
1643 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1644 * @dchan: DMA channel
1645 * @dma_dst: destination address
1646 * @dma_src: source address
1647 * @len: transfer length
1648 * @flags: transfer ack flags
1650 * Return: Async transaction descriptor on success and NULL on failure
1652 static struct dma_async_tx_descriptor
*
1653 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
1654 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1656 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1657 struct xilinx_dma_tx_descriptor
*desc
;
1658 struct xilinx_cdma_tx_segment
*segment
, *prev
;
1659 struct xilinx_cdma_desc_hw
*hw
;
1661 if (!len
|| len
> XILINX_DMA_MAX_TRANS_LEN
)
1664 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1668 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1669 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1671 /* Allocate the link descriptor from DMA pool */
1672 segment
= xilinx_cdma_alloc_tx_segment(chan
);
1678 hw
->src_addr
= dma_src
;
1679 hw
->dest_addr
= dma_dst
;
1680 if (chan
->ext_addr
) {
1681 hw
->src_addr_msb
= upper_32_bits(dma_src
);
1682 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
1685 /* Fill the previous next descriptor with current */
1686 prev
= list_last_entry(&desc
->segments
,
1687 struct xilinx_cdma_tx_segment
, node
);
1688 prev
->hw
.next_desc
= segment
->phys
;
1690 /* Insert the segment into the descriptor segments list. */
1691 list_add_tail(&segment
->node
, &desc
->segments
);
1695 /* Link the last hardware descriptor with the first. */
1696 segment
= list_first_entry(&desc
->segments
,
1697 struct xilinx_cdma_tx_segment
, node
);
1698 desc
->async_tx
.phys
= segment
->phys
;
1699 prev
->hw
.next_desc
= segment
->phys
;
1701 return &desc
->async_tx
;
1704 xilinx_dma_free_tx_descriptor(chan
, desc
);
1709 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1710 * @dchan: DMA channel
1711 * @sgl: scatterlist to transfer to/from
1712 * @sg_len: number of entries in @scatterlist
1713 * @direction: DMA direction
1714 * @flags: transfer ack flags
1715 * @context: APP words of the descriptor
1717 * Return: Async transaction descriptor on success and NULL on failure
1719 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1720 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1721 enum dma_transfer_direction direction
, unsigned long flags
,
1724 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1725 struct xilinx_dma_tx_descriptor
*desc
;
1726 struct xilinx_axidma_tx_segment
*segment
= NULL
, *prev
= NULL
;
1727 u32
*app_w
= (u32
*)context
;
1728 struct scatterlist
*sg
;
1733 if (!is_slave_direction(direction
))
1736 /* Allocate a transaction descriptor. */
1737 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1741 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1742 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1744 /* Build transactions using information in the scatter gather list */
1745 for_each_sg(sgl
, sg
, sg_len
, i
) {
1748 /* Loop until the entire scatterlist entry is used */
1749 while (sg_used
< sg_dma_len(sg
)) {
1750 struct xilinx_axidma_desc_hw
*hw
;
1752 /* Get a free segment */
1753 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1758 * Calculate the maximum number of bytes to transfer,
1759 * making sure it is less than the hw limit
1761 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
1762 XILINX_DMA_MAX_TRANS_LEN
);
1765 /* Fill in the descriptor */
1766 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
1771 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1773 memcpy(hw
->app
, app_w
, sizeof(u32
) *
1774 XILINX_DMA_NUM_APP_WORDS
);
1778 prev
->hw
.next_desc
= segment
->phys
;
1784 * Insert the segment into the descriptor segments
1787 list_add_tail(&segment
->node
, &desc
->segments
);
1791 segment
= list_first_entry(&desc
->segments
,
1792 struct xilinx_axidma_tx_segment
, node
);
1793 desc
->async_tx
.phys
= segment
->phys
;
1794 prev
->hw
.next_desc
= segment
->phys
;
1796 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1797 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1798 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1799 segment
= list_last_entry(&desc
->segments
,
1800 struct xilinx_axidma_tx_segment
,
1802 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1805 return &desc
->async_tx
;
1808 xilinx_dma_free_tx_descriptor(chan
, desc
);
1813 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1814 * @chan: DMA channel
1815 * @sgl: scatterlist to transfer to/from
1816 * @sg_len: number of entries in @scatterlist
1817 * @direction: DMA direction
1818 * @flags: transfer ack flags
1820 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
1821 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
1822 size_t period_len
, enum dma_transfer_direction direction
,
1823 unsigned long flags
)
1825 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1826 struct xilinx_dma_tx_descriptor
*desc
;
1827 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
1828 size_t copy
, sg_used
;
1829 unsigned int num_periods
;
1836 num_periods
= buf_len
/ period_len
;
1841 if (!is_slave_direction(direction
))
1844 /* Allocate a transaction descriptor. */
1845 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1849 chan
->direction
= direction
;
1850 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1851 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1853 for (i
= 0; i
< num_periods
; ++i
) {
1856 while (sg_used
< period_len
) {
1857 struct xilinx_axidma_desc_hw
*hw
;
1859 /* Get a free segment */
1860 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1865 * Calculate the maximum number of bytes to transfer,
1866 * making sure it is less than the hw limit
1868 copy
= min_t(size_t, period_len
- sg_used
,
1869 XILINX_DMA_MAX_TRANS_LEN
);
1871 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
1876 prev
->hw
.next_desc
= segment
->phys
;
1882 * Insert the segment into the descriptor segments
1885 list_add_tail(&segment
->node
, &desc
->segments
);
1889 head_segment
= list_first_entry(&desc
->segments
,
1890 struct xilinx_axidma_tx_segment
, node
);
1891 desc
->async_tx
.phys
= head_segment
->phys
;
1893 desc
->cyclic
= true;
1894 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1895 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
1896 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1898 segment
= list_last_entry(&desc
->segments
,
1899 struct xilinx_axidma_tx_segment
,
1901 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
1903 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1904 if (direction
== DMA_MEM_TO_DEV
) {
1905 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1906 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1909 return &desc
->async_tx
;
1912 xilinx_dma_free_tx_descriptor(chan
, desc
);
1917 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1918 * DMA_SLAVE transaction
1919 * @dchan: DMA channel
1920 * @xt: Interleaved template pointer
1921 * @flags: transfer ack flags
1923 * Return: Async transaction descriptor on success and NULL on failure
1925 static struct dma_async_tx_descriptor
*
1926 xilinx_dma_prep_interleaved(struct dma_chan
*dchan
,
1927 struct dma_interleaved_template
*xt
,
1928 unsigned long flags
)
1930 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1931 struct xilinx_dma_tx_descriptor
*desc
;
1932 struct xilinx_axidma_tx_segment
*segment
;
1933 struct xilinx_axidma_desc_hw
*hw
;
1935 if (!is_slave_direction(xt
->dir
))
1938 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1941 if (xt
->frame_size
!= 1)
1944 /* Allocate a transaction descriptor. */
1945 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1949 chan
->direction
= xt
->dir
;
1950 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1951 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1953 /* Get a free segment */
1954 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1960 /* Fill in the descriptor */
1961 if (xt
->dir
!= DMA_MEM_TO_DEV
)
1962 hw
->buf_addr
= xt
->dst_start
;
1964 hw
->buf_addr
= xt
->src_start
;
1966 hw
->mcdma_control
= chan
->tdest
& XILINX_DMA_BD_TDEST_MASK
;
1967 hw
->vsize_stride
= (xt
->numf
<< XILINX_DMA_BD_VSIZE_SHIFT
) &
1968 XILINX_DMA_BD_VSIZE_MASK
;
1969 hw
->vsize_stride
|= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) &
1970 XILINX_DMA_BD_STRIDE_MASK
;
1971 hw
->control
= xt
->sgl
[0].size
& XILINX_DMA_BD_HSIZE_MASK
;
1974 * Insert the segment into the descriptor segments
1977 list_add_tail(&segment
->node
, &desc
->segments
);
1980 segment
= list_first_entry(&desc
->segments
,
1981 struct xilinx_axidma_tx_segment
, node
);
1982 desc
->async_tx
.phys
= segment
->phys
;
1984 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1985 if (xt
->dir
== DMA_MEM_TO_DEV
) {
1986 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1987 segment
= list_last_entry(&desc
->segments
,
1988 struct xilinx_axidma_tx_segment
,
1990 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1993 return &desc
->async_tx
;
1996 xilinx_dma_free_tx_descriptor(chan
, desc
);
2001 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2002 * @chan: Driver specific DMA Channel pointer
2004 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2006 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2010 xilinx_dma_chan_reset(chan
);
2012 /* Halt the DMA engine */
2013 xilinx_dma_halt(chan
);
2015 /* Remove and free all of the descriptors in the lists */
2016 xilinx_dma_free_descriptors(chan
);
2019 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2020 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2021 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2022 chan
->cyclic
= false;
2029 * xilinx_dma_channel_set_config - Configure VDMA channel
2030 * Run-time configuration for Axi VDMA, supports:
2031 * . halt the channel
2032 * . configure interrupt coalescing and inter-packet delay threshold
2033 * . start/stop parking
2036 * @dchan: DMA channel
2037 * @cfg: VDMA device configuration pointer
2039 * Return: '0' on success and failure value on error
2041 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2042 struct xilinx_vdma_config
*cfg
)
2044 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2048 return xilinx_dma_chan_reset(chan
);
2050 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2052 chan
->config
.frm_dly
= cfg
->frm_dly
;
2053 chan
->config
.park
= cfg
->park
;
2055 /* genlock settings */
2056 chan
->config
.gen_lock
= cfg
->gen_lock
;
2057 chan
->config
.master
= cfg
->master
;
2059 if (cfg
->gen_lock
&& chan
->genlock
) {
2060 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2061 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2064 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2066 chan
->config
.park_frm
= cfg
->park_frm
;
2068 chan
->config
.park_frm
= -1;
2070 chan
->config
.coalesc
= cfg
->coalesc
;
2071 chan
->config
.delay
= cfg
->delay
;
2073 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2074 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2075 chan
->config
.coalesc
= cfg
->coalesc
;
2078 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2079 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2080 chan
->config
.delay
= cfg
->delay
;
2083 /* FSync Source selection */
2084 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2085 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2087 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2091 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2093 /* -----------------------------------------------------------------------------
2098 * xilinx_dma_chan_remove - Per Channel remove function
2099 * @chan: Driver specific DMA channel
2101 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2103 /* Disable all interrupts */
2104 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2105 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2108 free_irq(chan
->irq
, chan
);
2110 tasklet_kill(&chan
->tasklet
);
2112 list_del(&chan
->common
.device_node
);
2115 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2116 struct clk
**tx_clk
, struct clk
**rx_clk
,
2117 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2123 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2124 if (IS_ERR(*axi_clk
)) {
2125 err
= PTR_ERR(*axi_clk
);
2126 dev_err(&pdev
->dev
, "failed to get axi_aclk (%u)\n", err
);
2130 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2131 if (IS_ERR(*tx_clk
))
2134 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2135 if (IS_ERR(*rx_clk
))
2138 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2139 if (IS_ERR(*sg_clk
))
2142 err
= clk_prepare_enable(*axi_clk
);
2144 dev_err(&pdev
->dev
, "failed to enable axi_clk (%u)\n", err
);
2148 err
= clk_prepare_enable(*tx_clk
);
2150 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2151 goto err_disable_axiclk
;
2154 err
= clk_prepare_enable(*rx_clk
);
2156 dev_err(&pdev
->dev
, "failed to enable rx_clk (%u)\n", err
);
2157 goto err_disable_txclk
;
2160 err
= clk_prepare_enable(*sg_clk
);
2162 dev_err(&pdev
->dev
, "failed to enable sg_clk (%u)\n", err
);
2163 goto err_disable_rxclk
;
2169 clk_disable_unprepare(*rx_clk
);
2171 clk_disable_unprepare(*tx_clk
);
2173 clk_disable_unprepare(*axi_clk
);
2178 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2179 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2180 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2188 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2189 if (IS_ERR(*axi_clk
)) {
2190 err
= PTR_ERR(*axi_clk
);
2191 dev_err(&pdev
->dev
, "failed to get axi_clk (%u)\n", err
);
2195 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2196 if (IS_ERR(*dev_clk
)) {
2197 err
= PTR_ERR(*dev_clk
);
2198 dev_err(&pdev
->dev
, "failed to get dev_clk (%u)\n", err
);
2202 err
= clk_prepare_enable(*axi_clk
);
2204 dev_err(&pdev
->dev
, "failed to enable axi_clk (%u)\n", err
);
2208 err
= clk_prepare_enable(*dev_clk
);
2210 dev_err(&pdev
->dev
, "failed to enable dev_clk (%u)\n", err
);
2211 goto err_disable_axiclk
;
2217 clk_disable_unprepare(*axi_clk
);
2222 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2223 struct clk
**tx_clk
, struct clk
**txs_clk
,
2224 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2228 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2229 if (IS_ERR(*axi_clk
)) {
2230 err
= PTR_ERR(*axi_clk
);
2231 dev_err(&pdev
->dev
, "failed to get axi_aclk (%u)\n", err
);
2235 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2236 if (IS_ERR(*tx_clk
))
2239 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2240 if (IS_ERR(*txs_clk
))
2243 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2244 if (IS_ERR(*rx_clk
))
2247 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2248 if (IS_ERR(*rxs_clk
))
2251 err
= clk_prepare_enable(*axi_clk
);
2253 dev_err(&pdev
->dev
, "failed to enable axi_clk (%u)\n", err
);
2257 err
= clk_prepare_enable(*tx_clk
);
2259 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2260 goto err_disable_axiclk
;
2263 err
= clk_prepare_enable(*txs_clk
);
2265 dev_err(&pdev
->dev
, "failed to enable txs_clk (%u)\n", err
);
2266 goto err_disable_txclk
;
2269 err
= clk_prepare_enable(*rx_clk
);
2271 dev_err(&pdev
->dev
, "failed to enable rx_clk (%u)\n", err
);
2272 goto err_disable_txsclk
;
2275 err
= clk_prepare_enable(*rxs_clk
);
2277 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%u)\n", err
);
2278 goto err_disable_rxclk
;
2284 clk_disable_unprepare(*rx_clk
);
2286 clk_disable_unprepare(*txs_clk
);
2288 clk_disable_unprepare(*tx_clk
);
2290 clk_disable_unprepare(*axi_clk
);
2295 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2297 clk_disable_unprepare(xdev
->rxs_clk
);
2298 clk_disable_unprepare(xdev
->rx_clk
);
2299 clk_disable_unprepare(xdev
->txs_clk
);
2300 clk_disable_unprepare(xdev
->tx_clk
);
2301 clk_disable_unprepare(xdev
->axi_clk
);
2305 * xilinx_dma_chan_probe - Per Channel Probing
2306 * It get channel features from the device tree entry and
2307 * initialize special channel handling routines
2309 * @xdev: Driver specific device structure
2310 * @node: Device node
2312 * Return: '0' on success and failure value on error
2314 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2315 struct device_node
*node
, int chan_id
)
2317 struct xilinx_dma_chan
*chan
;
2318 bool has_dre
= false;
2322 /* Allocate and initialize the channel structure */
2323 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2327 chan
->dev
= xdev
->dev
;
2329 chan
->has_sg
= xdev
->has_sg
;
2330 chan
->desc_pendingcount
= 0x0;
2331 chan
->ext_addr
= xdev
->ext_addr
;
2333 spin_lock_init(&chan
->lock
);
2334 INIT_LIST_HEAD(&chan
->pending_list
);
2335 INIT_LIST_HEAD(&chan
->done_list
);
2336 INIT_LIST_HEAD(&chan
->active_list
);
2338 /* Retrieve the channel properties from the device tree */
2339 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2341 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2343 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2345 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2348 width
= value
>> 3; /* Convert bits to bytes */
2350 /* If data width is greater than 8 bytes, DRE is not in hw */
2355 xdev
->common
.copy_align
= fls(width
- 1);
2357 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2358 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2359 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2360 chan
->direction
= DMA_MEM_TO_DEV
;
2362 chan
->tdest
= chan_id
;
2364 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2365 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2366 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2368 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2369 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2370 chan
->flush_on_fsync
= true;
2372 } else if (of_device_is_compatible(node
,
2373 "xlnx,axi-vdma-s2mm-channel") ||
2374 of_device_is_compatible(node
,
2375 "xlnx,axi-dma-s2mm-channel")) {
2376 chan
->direction
= DMA_DEV_TO_MEM
;
2378 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2380 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2381 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2382 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2384 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2385 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2386 chan
->flush_on_fsync
= true;
2389 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2393 /* Request the interrupt */
2394 chan
->irq
= irq_of_parse_and_map(node
, 0);
2395 err
= request_irq(chan
->irq
, xilinx_dma_irq_handler
, IRQF_SHARED
,
2396 "xilinx-dma-controller", chan
);
2398 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2402 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2403 chan
->start_transfer
= xilinx_dma_start_transfer
;
2404 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)
2405 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2407 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2409 /* Initialize the tasklet */
2410 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2411 (unsigned long)chan
);
2414 * Initialize the DMA channel and add it to the DMA engine channels
2417 chan
->common
.device
= &xdev
->common
;
2419 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2420 xdev
->chan
[chan
->id
] = chan
;
2422 /* Reset the channel */
2423 err
= xilinx_dma_chan_reset(chan
);
2425 dev_err(xdev
->dev
, "Reset channel failed\n");
2433 * xilinx_dma_child_probe - Per child node probe
2434 * It get number of dma-channels per child node from
2435 * device-tree and initializes all the channels.
2437 * @xdev: Driver specific device structure
2438 * @node: Device node
2442 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2443 struct device_node
*node
) {
2444 int ret
, i
, nr_channels
= 1;
2446 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2447 if ((ret
< 0) && xdev
->mcdma
)
2448 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2450 for (i
= 0; i
< nr_channels
; i
++)
2451 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2453 xdev
->nr_channels
+= nr_channels
;
2459 * of_dma_xilinx_xlate - Translation function
2460 * @dma_spec: Pointer to DMA specifier as found in the device tree
2461 * @ofdma: Pointer to DMA controller data
2463 * Return: DMA channel pointer on success and NULL on error
2465 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2466 struct of_dma
*ofdma
)
2468 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2469 int chan_id
= dma_spec
->args
[0];
2471 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2474 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2477 static const struct xilinx_dma_config axidma_config
= {
2478 .dmatype
= XDMA_TYPE_AXIDMA
,
2479 .clk_init
= axidma_clk_init
,
2482 static const struct xilinx_dma_config axicdma_config
= {
2483 .dmatype
= XDMA_TYPE_CDMA
,
2484 .clk_init
= axicdma_clk_init
,
2487 static const struct xilinx_dma_config axivdma_config
= {
2488 .dmatype
= XDMA_TYPE_VDMA
,
2489 .clk_init
= axivdma_clk_init
,
2492 static const struct of_device_id xilinx_dma_of_ids
[] = {
2493 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2494 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2495 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2498 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2501 * xilinx_dma_probe - Driver probe function
2502 * @pdev: Pointer to the platform_device structure
2504 * Return: '0' on success and failure value on error
2506 static int xilinx_dma_probe(struct platform_device
*pdev
)
2508 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2509 struct clk
**, struct clk
**, struct clk
**)
2511 struct device_node
*node
= pdev
->dev
.of_node
;
2512 struct xilinx_dma_device
*xdev
;
2513 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2514 struct resource
*io
;
2515 u32 num_frames
, addr_width
;
2518 /* Allocate and initialize the DMA engine structure */
2519 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2523 xdev
->dev
= &pdev
->dev
;
2525 const struct of_device_id
*match
;
2527 match
= of_match_node(xilinx_dma_of_ids
, np
);
2528 if (match
&& match
->data
) {
2529 xdev
->dma_config
= match
->data
;
2530 clk_init
= xdev
->dma_config
->clk_init
;
2534 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
2535 &xdev
->rx_clk
, &xdev
->rxs_clk
);
2539 /* Request and map I/O memory */
2540 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2541 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
2542 if (IS_ERR(xdev
->regs
))
2543 return PTR_ERR(xdev
->regs
);
2545 /* Retrieve the DMA engine properties from the device tree */
2546 xdev
->has_sg
= of_property_read_bool(node
, "xlnx,include-sg");
2547 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2548 xdev
->mcdma
= of_property_read_bool(node
, "xlnx,mcdma");
2550 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2551 err
= of_property_read_u32(node
, "xlnx,num-fstores",
2555 "missing xlnx,num-fstores property\n");
2559 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
2560 &xdev
->flush_on_fsync
);
2563 "missing xlnx,flush-fsync property\n");
2566 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
2568 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
2570 if (addr_width
> 32)
2571 xdev
->ext_addr
= true;
2573 xdev
->ext_addr
= false;
2575 /* Set the dma mask bits */
2576 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
2578 /* Initialize the DMA engine */
2579 xdev
->common
.dev
= &pdev
->dev
;
2581 INIT_LIST_HEAD(&xdev
->common
.channels
);
2582 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
2583 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2584 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2587 xdev
->common
.device_alloc_chan_resources
=
2588 xilinx_dma_alloc_chan_resources
;
2589 xdev
->common
.device_free_chan_resources
=
2590 xilinx_dma_free_chan_resources
;
2591 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
2592 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
2593 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2594 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2595 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
2596 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2597 xdev
->common
.device_prep_dma_cyclic
=
2598 xilinx_dma_prep_dma_cyclic
;
2599 xdev
->common
.device_prep_interleaved_dma
=
2600 xilinx_dma_prep_interleaved
;
2601 /* Residue calculation is supported by only AXI DMA */
2602 xdev
->common
.residue_granularity
=
2603 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2604 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2605 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2606 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
2608 xdev
->common
.device_prep_interleaved_dma
=
2609 xilinx_vdma_dma_prep_interleaved
;
2612 platform_set_drvdata(pdev
, xdev
);
2614 /* Initialize the channels */
2615 for_each_child_of_node(node
, child
) {
2616 err
= xilinx_dma_child_probe(xdev
, child
);
2621 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2622 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2624 xdev
->chan
[i
]->num_frms
= num_frames
;
2627 /* Register the DMA engine with the core */
2628 dma_async_device_register(&xdev
->common
);
2630 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
2633 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
2634 dma_async_device_unregister(&xdev
->common
);
2638 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2643 xdma_disable_allclks(xdev
);
2645 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2647 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2653 * xilinx_dma_remove - Driver remove function
2654 * @pdev: Pointer to the platform_device structure
2656 * Return: Always '0'
2658 static int xilinx_dma_remove(struct platform_device
*pdev
)
2660 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2663 of_dma_controller_free(pdev
->dev
.of_node
);
2665 dma_async_device_unregister(&xdev
->common
);
2667 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2669 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2671 xdma_disable_allclks(xdev
);
2676 static struct platform_driver xilinx_vdma_driver
= {
2678 .name
= "xilinx-vdma",
2679 .of_match_table
= xilinx_dma_of_ids
,
2681 .probe
= xilinx_dma_probe
,
2682 .remove
= xilinx_dma_remove
,
2685 module_platform_driver(xilinx_vdma_driver
);
2687 MODULE_AUTHOR("Xilinx, Inc.");
2688 MODULE_DESCRIPTION("Xilinx VDMA driver");
2689 MODULE_LICENSE("GPL v2");