Commit | Line | Data |
---|---|---|
e7c0fe2a AG |
1 | /* |
2 | * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | /* | |
15 | * QCOM BAM DMA engine driver | |
16 | * | |
17 | * QCOM BAM DMA blocks are distributed amongst a number of the on-chip | |
18 | * peripherals on the MSM 8x74. The configuration of the channels are dependent | |
19 | * on the way they are hard wired to that specific peripheral. The peripheral | |
20 | * device tree entries specify the configuration of each channel. | |
21 | * | |
22 | * The DMA controller requires the use of external memory for storage of the | |
23 | * hardware descriptors for each channel. The descriptor FIFO is accessed as a | |
24 | * circular buffer and operations are managed according to the offset within the | |
25 | * FIFO. After pipe/channel reset, all of the pipe registers and internal state | |
26 | * are back to defaults. | |
27 | * | |
28 | * During DMA operations, we write descriptors to the FIFO, being careful to | |
29 | * handle wrapping and then write the last FIFO offset to that channel's | |
30 | * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register | |
31 | * indicates the current FIFO offset that is being processed, so there is some | |
32 | * indication of where the hardware is currently working. | |
33 | */ | |
34 | ||
35 | #include <linux/kernel.h> | |
36 | #include <linux/io.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/slab.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/dma-mapping.h> | |
42 | #include <linux/scatterlist.h> | |
43 | #include <linux/device.h> | |
44 | #include <linux/platform_device.h> | |
45 | #include <linux/of.h> | |
46 | #include <linux/of_address.h> | |
47 | #include <linux/of_irq.h> | |
48 | #include <linux/of_dma.h> | |
49 | #include <linux/clk.h> | |
50 | #include <linux/dmaengine.h> | |
51 | ||
52 | #include "dmaengine.h" | |
53 | #include "virt-dma.h" | |
54 | ||
55 | struct bam_desc_hw { | |
56 | u32 addr; /* Buffer physical address */ | |
57 | u16 size; /* Buffer size in bytes */ | |
58 | u16 flags; | |
59 | }; | |
60 | ||
61 | #define DESC_FLAG_INT BIT(15) | |
62 | #define DESC_FLAG_EOT BIT(14) | |
63 | #define DESC_FLAG_EOB BIT(13) | |
89751d0a | 64 | #define DESC_FLAG_NWD BIT(12) |
e7c0fe2a AG |
65 | |
66 | struct bam_async_desc { | |
67 | struct virt_dma_desc vd; | |
68 | ||
69 | u32 num_desc; | |
70 | u32 xfer_len; | |
89751d0a AG |
71 | |
72 | /* transaction flags, EOT|EOB|NWD */ | |
73 | u16 flags; | |
74 | ||
e7c0fe2a AG |
75 | struct bam_desc_hw *curr_desc; |
76 | ||
77 | enum dma_transfer_direction dir; | |
78 | size_t length; | |
79 | struct bam_desc_hw desc[0]; | |
80 | }; | |
81 | ||
fb93f520 AT |
82 | enum bam_reg { |
83 | BAM_CTRL, | |
84 | BAM_REVISION, | |
85 | BAM_NUM_PIPES, | |
86 | BAM_DESC_CNT_TRSHLD, | |
87 | BAM_IRQ_SRCS, | |
88 | BAM_IRQ_SRCS_MSK, | |
89 | BAM_IRQ_SRCS_UNMASKED, | |
90 | BAM_IRQ_STTS, | |
91 | BAM_IRQ_CLR, | |
92 | BAM_IRQ_EN, | |
93 | BAM_CNFG_BITS, | |
94 | BAM_IRQ_SRCS_EE, | |
95 | BAM_IRQ_SRCS_MSK_EE, | |
96 | BAM_P_CTRL, | |
97 | BAM_P_RST, | |
98 | BAM_P_HALT, | |
99 | BAM_P_IRQ_STTS, | |
100 | BAM_P_IRQ_CLR, | |
101 | BAM_P_IRQ_EN, | |
102 | BAM_P_EVNT_DEST_ADDR, | |
103 | BAM_P_EVNT_REG, | |
104 | BAM_P_SW_OFSTS, | |
105 | BAM_P_DATA_FIFO_ADDR, | |
106 | BAM_P_DESC_FIFO_ADDR, | |
107 | BAM_P_EVNT_GEN_TRSHLD, | |
108 | BAM_P_FIFO_SIZES, | |
109 | }; | |
110 | ||
111 | struct reg_offset_data { | |
112 | u32 base_offset; | |
113 | unsigned int pipe_mult, evnt_mult, ee_mult; | |
114 | }; | |
115 | ||
f43669de AT |
116 | static const struct reg_offset_data bam_v1_3_reg_info[] = { |
117 | [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 }, | |
118 | [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 }, | |
119 | [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 }, | |
120 | [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 }, | |
121 | [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 }, | |
122 | [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 }, | |
123 | [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 }, | |
124 | [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 }, | |
125 | [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 }, | |
126 | [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 }, | |
127 | [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 }, | |
128 | [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 }, | |
129 | [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 }, | |
130 | [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 }, | |
131 | [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 }, | |
132 | [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 }, | |
133 | [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 }, | |
134 | [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 }, | |
135 | [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 }, | |
136 | [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 }, | |
137 | [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 }, | |
138 | [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 }, | |
139 | [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 }, | |
140 | [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 }, | |
141 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 }, | |
142 | [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 }, | |
143 | }; | |
144 | ||
145 | static const struct reg_offset_data bam_v1_4_reg_info[] = { | |
fb93f520 AT |
146 | [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 }, |
147 | [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 }, | |
148 | [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 }, | |
149 | [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 }, | |
150 | [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 }, | |
151 | [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 }, | |
152 | [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 }, | |
153 | [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 }, | |
154 | [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 }, | |
155 | [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 }, | |
156 | [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 }, | |
157 | [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 }, | |
158 | [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 }, | |
159 | [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 }, | |
160 | [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 }, | |
161 | [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 }, | |
162 | [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, | |
163 | [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, | |
164 | [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, | |
165 | [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, | |
166 | [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, | |
167 | [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, | |
168 | [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, | |
169 | [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, | |
170 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, | |
171 | [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, | |
172 | }; | |
e7c0fe2a AG |
173 | |
174 | /* BAM CTRL */ | |
175 | #define BAM_SW_RST BIT(0) | |
176 | #define BAM_EN BIT(1) | |
177 | #define BAM_EN_ACCUM BIT(4) | |
178 | #define BAM_TESTBUS_SEL_SHIFT 5 | |
179 | #define BAM_TESTBUS_SEL_MASK 0x3F | |
180 | #define BAM_DESC_CACHE_SEL_SHIFT 13 | |
181 | #define BAM_DESC_CACHE_SEL_MASK 0x3 | |
182 | #define BAM_CACHED_DESC_STORE BIT(15) | |
183 | #define IBC_DISABLE BIT(16) | |
184 | ||
185 | /* BAM REVISION */ | |
186 | #define REVISION_SHIFT 0 | |
187 | #define REVISION_MASK 0xFF | |
188 | #define NUM_EES_SHIFT 8 | |
189 | #define NUM_EES_MASK 0xF | |
190 | #define CE_BUFFER_SIZE BIT(13) | |
191 | #define AXI_ACTIVE BIT(14) | |
192 | #define USE_VMIDMT BIT(15) | |
193 | #define SECURED BIT(16) | |
194 | #define BAM_HAS_NO_BYPASS BIT(17) | |
195 | #define HIGH_FREQUENCY_BAM BIT(18) | |
196 | #define INACTIV_TMRS_EXST BIT(19) | |
197 | #define NUM_INACTIV_TMRS BIT(20) | |
198 | #define DESC_CACHE_DEPTH_SHIFT 21 | |
199 | #define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) | |
200 | #define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) | |
201 | #define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) | |
202 | #define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) | |
203 | #define CMD_DESC_EN BIT(23) | |
204 | #define INACTIV_TMR_BASE_SHIFT 24 | |
205 | #define INACTIV_TMR_BASE_MASK 0xFF | |
206 | ||
207 | /* BAM NUM PIPES */ | |
208 | #define BAM_NUM_PIPES_SHIFT 0 | |
209 | #define BAM_NUM_PIPES_MASK 0xFF | |
210 | #define PERIPH_NON_PIPE_GRP_SHIFT 16 | |
211 | #define PERIPH_NON_PIP_GRP_MASK 0xFF | |
212 | #define BAM_NON_PIPE_GRP_SHIFT 24 | |
213 | #define BAM_NON_PIPE_GRP_MASK 0xFF | |
214 | ||
215 | /* BAM CNFG BITS */ | |
216 | #define BAM_PIPE_CNFG BIT(2) | |
217 | #define BAM_FULL_PIPE BIT(11) | |
218 | #define BAM_NO_EXT_P_RST BIT(12) | |
219 | #define BAM_IBC_DISABLE BIT(13) | |
220 | #define BAM_SB_CLK_REQ BIT(14) | |
221 | #define BAM_PSM_CSW_REQ BIT(15) | |
222 | #define BAM_PSM_P_RES BIT(16) | |
223 | #define BAM_AU_P_RES BIT(17) | |
224 | #define BAM_SI_P_RES BIT(18) | |
225 | #define BAM_WB_P_RES BIT(19) | |
226 | #define BAM_WB_BLK_CSW BIT(20) | |
227 | #define BAM_WB_CSW_ACK_IDL BIT(21) | |
228 | #define BAM_WB_RETR_SVPNT BIT(22) | |
229 | #define BAM_WB_DSC_AVL_P_RST BIT(23) | |
230 | #define BAM_REG_P_EN BIT(24) | |
231 | #define BAM_PSM_P_HD_DATA BIT(25) | |
232 | #define BAM_AU_ACCUMED BIT(26) | |
233 | #define BAM_CMD_ENABLE BIT(27) | |
234 | ||
235 | #define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ | |
236 | BAM_NO_EXT_P_RST | \ | |
237 | BAM_IBC_DISABLE | \ | |
238 | BAM_SB_CLK_REQ | \ | |
239 | BAM_PSM_CSW_REQ | \ | |
240 | BAM_PSM_P_RES | \ | |
241 | BAM_AU_P_RES | \ | |
242 | BAM_SI_P_RES | \ | |
243 | BAM_WB_P_RES | \ | |
244 | BAM_WB_BLK_CSW | \ | |
245 | BAM_WB_CSW_ACK_IDL | \ | |
246 | BAM_WB_RETR_SVPNT | \ | |
247 | BAM_WB_DSC_AVL_P_RST | \ | |
248 | BAM_REG_P_EN | \ | |
249 | BAM_PSM_P_HD_DATA | \ | |
250 | BAM_AU_ACCUMED | \ | |
251 | BAM_CMD_ENABLE) | |
252 | ||
253 | /* PIPE CTRL */ | |
254 | #define P_EN BIT(1) | |
255 | #define P_DIRECTION BIT(3) | |
256 | #define P_SYS_STRM BIT(4) | |
257 | #define P_SYS_MODE BIT(5) | |
258 | #define P_AUTO_EOB BIT(6) | |
259 | #define P_AUTO_EOB_SEL_SHIFT 7 | |
260 | #define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) | |
261 | #define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) | |
262 | #define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) | |
263 | #define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) | |
264 | #define P_PREFETCH_LIMIT_SHIFT 9 | |
265 | #define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) | |
266 | #define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) | |
267 | #define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) | |
268 | #define P_WRITE_NWD BIT(11) | |
269 | #define P_LOCK_GROUP_SHIFT 16 | |
270 | #define P_LOCK_GROUP_MASK 0x1F | |
271 | ||
272 | /* BAM_DESC_CNT_TRSHLD */ | |
273 | #define CNT_TRSHLD 0xffff | |
274 | #define DEFAULT_CNT_THRSHLD 0x4 | |
275 | ||
276 | /* BAM_IRQ_SRCS */ | |
277 | #define BAM_IRQ BIT(31) | |
278 | #define P_IRQ 0x7fffffff | |
279 | ||
280 | /* BAM_IRQ_SRCS_MSK */ | |
281 | #define BAM_IRQ_MSK BAM_IRQ | |
282 | #define P_IRQ_MSK P_IRQ | |
283 | ||
284 | /* BAM_IRQ_STTS */ | |
285 | #define BAM_TIMER_IRQ BIT(4) | |
286 | #define BAM_EMPTY_IRQ BIT(3) | |
287 | #define BAM_ERROR_IRQ BIT(2) | |
288 | #define BAM_HRESP_ERR_IRQ BIT(1) | |
289 | ||
290 | /* BAM_IRQ_CLR */ | |
291 | #define BAM_TIMER_CLR BIT(4) | |
292 | #define BAM_EMPTY_CLR BIT(3) | |
293 | #define BAM_ERROR_CLR BIT(2) | |
294 | #define BAM_HRESP_ERR_CLR BIT(1) | |
295 | ||
296 | /* BAM_IRQ_EN */ | |
297 | #define BAM_TIMER_EN BIT(4) | |
298 | #define BAM_EMPTY_EN BIT(3) | |
299 | #define BAM_ERROR_EN BIT(2) | |
300 | #define BAM_HRESP_ERR_EN BIT(1) | |
301 | ||
302 | /* BAM_P_IRQ_EN */ | |
303 | #define P_PRCSD_DESC_EN BIT(0) | |
304 | #define P_TIMER_EN BIT(1) | |
305 | #define P_WAKE_EN BIT(2) | |
306 | #define P_OUT_OF_DESC_EN BIT(3) | |
307 | #define P_ERR_EN BIT(4) | |
308 | #define P_TRNSFR_END_EN BIT(5) | |
309 | #define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) | |
310 | ||
311 | /* BAM_P_SW_OFSTS */ | |
312 | #define P_SW_OFSTS_MASK 0xffff | |
313 | ||
314 | #define BAM_DESC_FIFO_SIZE SZ_32K | |
315 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) | |
316 | #define BAM_MAX_DATA_SIZE (SZ_32K - 8) | |
317 | ||
318 | struct bam_chan { | |
319 | struct virt_dma_chan vc; | |
320 | ||
321 | struct bam_device *bdev; | |
322 | ||
323 | /* configuration from device tree */ | |
324 | u32 id; | |
325 | ||
326 | struct bam_async_desc *curr_txd; /* current running dma */ | |
327 | ||
328 | /* runtime configuration */ | |
329 | struct dma_slave_config slave; | |
330 | ||
331 | /* fifo storage */ | |
332 | struct bam_desc_hw *fifo_virt; | |
333 | dma_addr_t fifo_phys; | |
334 | ||
335 | /* fifo markers */ | |
336 | unsigned short head; /* start of active descriptor entries */ | |
337 | unsigned short tail; /* end of active descriptor entries */ | |
338 | ||
339 | unsigned int initialized; /* is the channel hw initialized? */ | |
340 | unsigned int paused; /* is the channel paused? */ | |
341 | unsigned int reconfigure; /* new slave config? */ | |
342 | ||
343 | struct list_head node; | |
344 | }; | |
345 | ||
346 | static inline struct bam_chan *to_bam_chan(struct dma_chan *common) | |
347 | { | |
348 | return container_of(common, struct bam_chan, vc.chan); | |
349 | } | |
350 | ||
351 | struct bam_device { | |
352 | void __iomem *regs; | |
353 | struct device *dev; | |
354 | struct dma_device common; | |
355 | struct device_dma_parameters dma_parms; | |
356 | struct bam_chan *channels; | |
357 | u32 num_channels; | |
358 | ||
359 | /* execution environment ID, from DT */ | |
360 | u32 ee; | |
361 | ||
f43669de AT |
362 | const struct reg_offset_data *layout; |
363 | ||
e7c0fe2a AG |
364 | struct clk *bamclk; |
365 | int irq; | |
366 | ||
367 | /* dma start transaction tasklet */ | |
368 | struct tasklet_struct task; | |
369 | }; | |
370 | ||
fb93f520 AT |
371 | /** |
372 | * bam_addr - returns BAM register address | |
373 | * @bdev: bam device | |
374 | * @pipe: pipe instance (ignored when register doesn't have multiple instances) | |
375 | * @reg: register enum | |
376 | */ | |
377 | static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, | |
378 | enum bam_reg reg) | |
379 | { | |
f43669de | 380 | const struct reg_offset_data r = bdev->layout[reg]; |
fb93f520 AT |
381 | |
382 | return bdev->regs + r.base_offset + | |
383 | r.pipe_mult * pipe + | |
384 | r.evnt_mult * pipe + | |
385 | r.ee_mult * bdev->ee; | |
386 | } | |
387 | ||
e7c0fe2a AG |
388 | /** |
389 | * bam_reset_channel - Reset individual BAM DMA channel | |
390 | * @bchan: bam channel | |
391 | * | |
392 | * This function resets a specific BAM channel | |
393 | */ | |
394 | static void bam_reset_channel(struct bam_chan *bchan) | |
395 | { | |
396 | struct bam_device *bdev = bchan->bdev; | |
397 | ||
398 | lockdep_assert_held(&bchan->vc.lock); | |
399 | ||
400 | /* reset channel */ | |
fb93f520 AT |
401 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); |
402 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); | |
e7c0fe2a AG |
403 | |
404 | /* don't allow cpu to reorder BAM register accesses done after this */ | |
405 | wmb(); | |
406 | ||
407 | /* make sure hw is initialized when channel is used the first time */ | |
408 | bchan->initialized = 0; | |
409 | } | |
410 | ||
411 | /** | |
412 | * bam_chan_init_hw - Initialize channel hardware | |
413 | * @bchan: bam channel | |
414 | * | |
415 | * This function resets and initializes the BAM channel | |
416 | */ | |
417 | static void bam_chan_init_hw(struct bam_chan *bchan, | |
418 | enum dma_transfer_direction dir) | |
419 | { | |
420 | struct bam_device *bdev = bchan->bdev; | |
421 | u32 val; | |
422 | ||
423 | /* Reset the channel to clear internal state of the FIFO */ | |
424 | bam_reset_channel(bchan); | |
425 | ||
426 | /* | |
427 | * write out 8 byte aligned address. We have enough space for this | |
428 | * because we allocated 1 more descriptor (8 bytes) than we can use | |
429 | */ | |
430 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), | |
fb93f520 AT |
431 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); |
432 | writel_relaxed(BAM_DESC_FIFO_SIZE, | |
433 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); | |
e7c0fe2a AG |
434 | |
435 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ | |
fb93f520 AT |
436 | writel_relaxed(P_DEFAULT_IRQS_EN, |
437 | bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); | |
e7c0fe2a AG |
438 | |
439 | /* unmask the specific pipe and EE combo */ | |
fb93f520 | 440 | val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
e7c0fe2a | 441 | val |= BIT(bchan->id); |
fb93f520 | 442 | writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
e7c0fe2a AG |
443 | |
444 | /* don't allow cpu to reorder the channel enable done below */ | |
445 | wmb(); | |
446 | ||
447 | /* set fixed direction and mode, then enable channel */ | |
448 | val = P_EN | P_SYS_MODE; | |
449 | if (dir == DMA_DEV_TO_MEM) | |
450 | val |= P_DIRECTION; | |
451 | ||
fb93f520 | 452 | writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); |
e7c0fe2a AG |
453 | |
454 | bchan->initialized = 1; | |
455 | ||
456 | /* init FIFO pointers */ | |
457 | bchan->head = 0; | |
458 | bchan->tail = 0; | |
459 | } | |
460 | ||
461 | /** | |
462 | * bam_alloc_chan - Allocate channel resources for DMA channel. | |
463 | * @chan: specified channel | |
464 | * | |
465 | * This function allocates the FIFO descriptor memory | |
466 | */ | |
467 | static int bam_alloc_chan(struct dma_chan *chan) | |
468 | { | |
469 | struct bam_chan *bchan = to_bam_chan(chan); | |
470 | struct bam_device *bdev = bchan->bdev; | |
471 | ||
472 | if (bchan->fifo_virt) | |
473 | return 0; | |
474 | ||
475 | /* allocate FIFO descriptor space, but only if necessary */ | |
476 | bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, | |
477 | &bchan->fifo_phys, GFP_KERNEL); | |
478 | ||
479 | if (!bchan->fifo_virt) { | |
480 | dev_err(bdev->dev, "Failed to allocate desc fifo\n"); | |
481 | return -ENOMEM; | |
482 | } | |
483 | ||
484 | return 0; | |
485 | } | |
486 | ||
487 | /** | |
488 | * bam_free_chan - Frees dma resources associated with specific channel | |
489 | * @chan: specified channel | |
490 | * | |
491 | * Free the allocated fifo descriptor memory and channel resources | |
492 | * | |
493 | */ | |
494 | static void bam_free_chan(struct dma_chan *chan) | |
495 | { | |
496 | struct bam_chan *bchan = to_bam_chan(chan); | |
497 | struct bam_device *bdev = bchan->bdev; | |
498 | u32 val; | |
499 | unsigned long flags; | |
500 | ||
501 | vchan_free_chan_resources(to_virt_chan(chan)); | |
502 | ||
503 | if (bchan->curr_txd) { | |
504 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); | |
505 | return; | |
506 | } | |
507 | ||
508 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
509 | bam_reset_channel(bchan); | |
510 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
511 | ||
512 | dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, | |
513 | bchan->fifo_phys); | |
514 | bchan->fifo_virt = NULL; | |
515 | ||
516 | /* mask irq for pipe/channel */ | |
fb93f520 | 517 | val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
e7c0fe2a | 518 | val &= ~BIT(bchan->id); |
fb93f520 | 519 | writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
e7c0fe2a AG |
520 | |
521 | /* disable irq */ | |
fb93f520 | 522 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); |
e7c0fe2a AG |
523 | } |
524 | ||
525 | /** | |
526 | * bam_slave_config - set slave configuration for channel | |
527 | * @chan: dma channel | |
528 | * @cfg: slave configuration | |
529 | * | |
530 | * Sets slave configuration for channel | |
531 | * | |
532 | */ | |
62ec8eb5 MR |
533 | static int bam_slave_config(struct dma_chan *chan, |
534 | struct dma_slave_config *cfg) | |
e7c0fe2a | 535 | { |
62ec8eb5 MR |
536 | struct bam_chan *bchan = to_bam_chan(chan); |
537 | unsigned long flag; | |
538 | ||
539 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
e7c0fe2a AG |
540 | memcpy(&bchan->slave, cfg, sizeof(*cfg)); |
541 | bchan->reconfigure = 1; | |
62ec8eb5 MR |
542 | spin_unlock_irqrestore(&bchan->vc.lock, flag); |
543 | ||
544 | return 0; | |
e7c0fe2a AG |
545 | } |
546 | ||
547 | /** | |
548 | * bam_prep_slave_sg - Prep slave sg transaction | |
549 | * | |
550 | * @chan: dma channel | |
551 | * @sgl: scatter gather list | |
552 | * @sg_len: length of sg | |
553 | * @direction: DMA transfer direction | |
554 | * @flags: DMA flags | |
555 | * @context: transfer context (unused) | |
556 | */ | |
557 | static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |
558 | struct scatterlist *sgl, unsigned int sg_len, | |
559 | enum dma_transfer_direction direction, unsigned long flags, | |
560 | void *context) | |
561 | { | |
562 | struct bam_chan *bchan = to_bam_chan(chan); | |
563 | struct bam_device *bdev = bchan->bdev; | |
564 | struct bam_async_desc *async_desc; | |
565 | struct scatterlist *sg; | |
566 | u32 i; | |
567 | struct bam_desc_hw *desc; | |
568 | unsigned int num_alloc = 0; | |
569 | ||
570 | ||
571 | if (!is_slave_direction(direction)) { | |
572 | dev_err(bdev->dev, "invalid dma direction\n"); | |
573 | return NULL; | |
574 | } | |
575 | ||
576 | /* calculate number of required entries */ | |
577 | for_each_sg(sgl, sg, sg_len, i) | |
578 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); | |
579 | ||
580 | /* allocate enough room to accomodate the number of entries */ | |
581 | async_desc = kzalloc(sizeof(*async_desc) + | |
582 | (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); | |
583 | ||
584 | if (!async_desc) | |
585 | goto err_out; | |
586 | ||
89751d0a AG |
587 | if (flags & DMA_PREP_FENCE) |
588 | async_desc->flags |= DESC_FLAG_NWD; | |
589 | ||
590 | if (flags & DMA_PREP_INTERRUPT) | |
591 | async_desc->flags |= DESC_FLAG_EOT; | |
592 | else | |
593 | async_desc->flags |= DESC_FLAG_INT; | |
594 | ||
e7c0fe2a AG |
595 | async_desc->num_desc = num_alloc; |
596 | async_desc->curr_desc = async_desc->desc; | |
597 | async_desc->dir = direction; | |
598 | ||
599 | /* fill in temporary descriptors */ | |
600 | desc = async_desc->desc; | |
601 | for_each_sg(sgl, sg, sg_len, i) { | |
602 | unsigned int remainder = sg_dma_len(sg); | |
603 | unsigned int curr_offset = 0; | |
604 | ||
605 | do { | |
606 | desc->addr = sg_dma_address(sg) + curr_offset; | |
607 | ||
608 | if (remainder > BAM_MAX_DATA_SIZE) { | |
609 | desc->size = BAM_MAX_DATA_SIZE; | |
610 | remainder -= BAM_MAX_DATA_SIZE; | |
611 | curr_offset += BAM_MAX_DATA_SIZE; | |
612 | } else { | |
613 | desc->size = remainder; | |
614 | remainder = 0; | |
615 | } | |
616 | ||
617 | async_desc->length += desc->size; | |
618 | desc++; | |
619 | } while (remainder > 0); | |
620 | } | |
621 | ||
622 | return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); | |
623 | ||
624 | err_out: | |
625 | kfree(async_desc); | |
626 | return NULL; | |
627 | } | |
628 | ||
629 | /** | |
630 | * bam_dma_terminate_all - terminate all transactions on a channel | |
631 | * @bchan: bam dma channel | |
632 | * | |
633 | * Dequeues and frees all transactions | |
634 | * No callbacks are done | |
635 | * | |
636 | */ | |
62ec8eb5 | 637 | static int bam_dma_terminate_all(struct dma_chan *chan) |
e7c0fe2a | 638 | { |
62ec8eb5 | 639 | struct bam_chan *bchan = to_bam_chan(chan); |
e7c0fe2a AG |
640 | unsigned long flag; |
641 | LIST_HEAD(head); | |
642 | ||
643 | /* remove all transactions, including active transaction */ | |
644 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
645 | if (bchan->curr_txd) { | |
646 | list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); | |
647 | bchan->curr_txd = NULL; | |
648 | } | |
649 | ||
650 | vchan_get_all_descriptors(&bchan->vc, &head); | |
651 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
652 | ||
653 | vchan_dma_desc_free_list(&bchan->vc, &head); | |
62ec8eb5 MR |
654 | |
655 | return 0; | |
e7c0fe2a AG |
656 | } |
657 | ||
658 | /** | |
62ec8eb5 | 659 | * bam_pause - Pause DMA channel |
e7c0fe2a | 660 | * @chan: dma channel |
e7c0fe2a | 661 | * |
62ec8eb5 MR |
662 | */ |
663 | static int bam_pause(struct dma_chan *chan) | |
664 | { | |
665 | struct bam_chan *bchan = to_bam_chan(chan); | |
666 | struct bam_device *bdev = bchan->bdev; | |
667 | unsigned long flag; | |
668 | ||
669 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
670 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); | |
671 | bchan->paused = 1; | |
672 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | /** | |
678 | * bam_resume - Resume DMA channel operations | |
679 | * @chan: dma channel | |
e7c0fe2a AG |
680 | * |
681 | */ | |
62ec8eb5 | 682 | static int bam_resume(struct dma_chan *chan) |
e7c0fe2a AG |
683 | { |
684 | struct bam_chan *bchan = to_bam_chan(chan); | |
685 | struct bam_device *bdev = bchan->bdev; | |
e7c0fe2a AG |
686 | unsigned long flag; |
687 | ||
62ec8eb5 MR |
688 | spin_lock_irqsave(&bchan->vc.lock, flag); |
689 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); | |
690 | bchan->paused = 0; | |
691 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
e7c0fe2a | 692 | |
62ec8eb5 | 693 | return 0; |
e7c0fe2a AG |
694 | } |
695 | ||
696 | /** | |
697 | * process_channel_irqs - processes the channel interrupts | |
698 | * @bdev: bam controller | |
699 | * | |
700 | * This function processes the channel interrupts | |
701 | * | |
702 | */ | |
703 | static u32 process_channel_irqs(struct bam_device *bdev) | |
704 | { | |
705 | u32 i, srcs, pipe_stts; | |
706 | unsigned long flags; | |
707 | struct bam_async_desc *async_desc; | |
708 | ||
fb93f520 | 709 | srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); |
e7c0fe2a AG |
710 | |
711 | /* return early if no pipe/channel interrupts are present */ | |
712 | if (!(srcs & P_IRQ)) | |
713 | return srcs; | |
714 | ||
715 | for (i = 0; i < bdev->num_channels; i++) { | |
716 | struct bam_chan *bchan = &bdev->channels[i]; | |
717 | ||
718 | if (!(srcs & BIT(i))) | |
719 | continue; | |
720 | ||
721 | /* clear pipe irq */ | |
fb93f520 | 722 | pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); |
e7c0fe2a | 723 | |
fb93f520 | 724 | writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); |
e7c0fe2a AG |
725 | |
726 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
727 | async_desc = bchan->curr_txd; | |
728 | ||
729 | if (async_desc) { | |
730 | async_desc->num_desc -= async_desc->xfer_len; | |
731 | async_desc->curr_desc += async_desc->xfer_len; | |
732 | bchan->curr_txd = NULL; | |
733 | ||
734 | /* manage FIFO */ | |
735 | bchan->head += async_desc->xfer_len; | |
736 | bchan->head %= MAX_DESCRIPTORS; | |
737 | ||
738 | /* | |
739 | * if complete, process cookie. Otherwise | |
740 | * push back to front of desc_issued so that | |
741 | * it gets restarted by the tasklet | |
742 | */ | |
743 | if (!async_desc->num_desc) | |
744 | vchan_cookie_complete(&async_desc->vd); | |
745 | else | |
746 | list_add(&async_desc->vd.node, | |
747 | &bchan->vc.desc_issued); | |
748 | } | |
749 | ||
750 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
751 | } | |
752 | ||
753 | return srcs; | |
754 | } | |
755 | ||
756 | /** | |
757 | * bam_dma_irq - irq handler for bam controller | |
758 | * @irq: IRQ of interrupt | |
759 | * @data: callback data | |
760 | * | |
761 | * IRQ handler for the bam controller | |
762 | */ | |
763 | static irqreturn_t bam_dma_irq(int irq, void *data) | |
764 | { | |
765 | struct bam_device *bdev = data; | |
766 | u32 clr_mask = 0, srcs = 0; | |
767 | ||
768 | srcs |= process_channel_irqs(bdev); | |
769 | ||
770 | /* kick off tasklet to start next dma transfer */ | |
771 | if (srcs & P_IRQ) | |
772 | tasklet_schedule(&bdev->task); | |
773 | ||
774 | if (srcs & BAM_IRQ) | |
fb93f520 | 775 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); |
e7c0fe2a AG |
776 | |
777 | /* don't allow reorder of the various accesses to the BAM registers */ | |
778 | mb(); | |
779 | ||
fb93f520 | 780 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); |
e7c0fe2a AG |
781 | |
782 | return IRQ_HANDLED; | |
783 | } | |
784 | ||
785 | /** | |
786 | * bam_tx_status - returns status of transaction | |
787 | * @chan: dma channel | |
788 | * @cookie: transaction cookie | |
789 | * @txstate: DMA transaction state | |
790 | * | |
791 | * Return status of dma transaction | |
792 | */ | |
793 | static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
794 | struct dma_tx_state *txstate) | |
795 | { | |
796 | struct bam_chan *bchan = to_bam_chan(chan); | |
797 | struct virt_dma_desc *vd; | |
798 | int ret; | |
799 | size_t residue = 0; | |
800 | unsigned int i; | |
801 | unsigned long flags; | |
802 | ||
803 | ret = dma_cookie_status(chan, cookie, txstate); | |
804 | if (ret == DMA_COMPLETE) | |
805 | return ret; | |
806 | ||
807 | if (!txstate) | |
808 | return bchan->paused ? DMA_PAUSED : ret; | |
809 | ||
810 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
811 | vd = vchan_find_desc(&bchan->vc, cookie); | |
812 | if (vd) | |
813 | residue = container_of(vd, struct bam_async_desc, vd)->length; | |
814 | else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) | |
815 | for (i = 0; i < bchan->curr_txd->num_desc; i++) | |
816 | residue += bchan->curr_txd->curr_desc[i].size; | |
817 | ||
818 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
819 | ||
820 | dma_set_residue(txstate, residue); | |
821 | ||
822 | if (ret == DMA_IN_PROGRESS && bchan->paused) | |
823 | ret = DMA_PAUSED; | |
824 | ||
825 | return ret; | |
826 | } | |
827 | ||
828 | /** | |
829 | * bam_apply_new_config | |
830 | * @bchan: bam dma channel | |
831 | * @dir: DMA direction | |
832 | */ | |
833 | static void bam_apply_new_config(struct bam_chan *bchan, | |
834 | enum dma_transfer_direction dir) | |
835 | { | |
836 | struct bam_device *bdev = bchan->bdev; | |
837 | u32 maxburst; | |
838 | ||
839 | if (dir == DMA_DEV_TO_MEM) | |
840 | maxburst = bchan->slave.src_maxburst; | |
841 | else | |
842 | maxburst = bchan->slave.dst_maxburst; | |
843 | ||
fb93f520 | 844 | writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); |
e7c0fe2a AG |
845 | |
846 | bchan->reconfigure = 0; | |
847 | } | |
848 | ||
849 | /** | |
850 | * bam_start_dma - start next transaction | |
851 | * @bchan - bam dma channel | |
852 | */ | |
853 | static void bam_start_dma(struct bam_chan *bchan) | |
854 | { | |
855 | struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); | |
856 | struct bam_device *bdev = bchan->bdev; | |
857 | struct bam_async_desc *async_desc; | |
858 | struct bam_desc_hw *desc; | |
859 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, | |
860 | sizeof(struct bam_desc_hw)); | |
861 | ||
862 | lockdep_assert_held(&bchan->vc.lock); | |
863 | ||
864 | if (!vd) | |
865 | return; | |
866 | ||
867 | list_del(&vd->node); | |
868 | ||
869 | async_desc = container_of(vd, struct bam_async_desc, vd); | |
870 | bchan->curr_txd = async_desc; | |
871 | ||
872 | /* on first use, initialize the channel hardware */ | |
873 | if (!bchan->initialized) | |
874 | bam_chan_init_hw(bchan, async_desc->dir); | |
875 | ||
876 | /* apply new slave config changes, if necessary */ | |
877 | if (bchan->reconfigure) | |
878 | bam_apply_new_config(bchan, async_desc->dir); | |
879 | ||
880 | desc = bchan->curr_txd->curr_desc; | |
881 | ||
882 | if (async_desc->num_desc > MAX_DESCRIPTORS) | |
883 | async_desc->xfer_len = MAX_DESCRIPTORS; | |
884 | else | |
885 | async_desc->xfer_len = async_desc->num_desc; | |
886 | ||
89751d0a AG |
887 | /* set any special flags on the last descriptor */ |
888 | if (async_desc->num_desc == async_desc->xfer_len) | |
889 | desc[async_desc->xfer_len - 1].flags = async_desc->flags; | |
890 | else | |
891 | desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; | |
e7c0fe2a AG |
892 | |
893 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | |
894 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | |
895 | ||
896 | memcpy(&fifo[bchan->tail], desc, | |
897 | partial * sizeof(struct bam_desc_hw)); | |
898 | memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * | |
899 | sizeof(struct bam_desc_hw)); | |
900 | } else { | |
901 | memcpy(&fifo[bchan->tail], desc, | |
902 | async_desc->xfer_len * sizeof(struct bam_desc_hw)); | |
903 | } | |
904 | ||
905 | bchan->tail += async_desc->xfer_len; | |
906 | bchan->tail %= MAX_DESCRIPTORS; | |
907 | ||
908 | /* ensure descriptor writes and dma start not reordered */ | |
909 | wmb(); | |
910 | writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), | |
fb93f520 | 911 | bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); |
e7c0fe2a AG |
912 | } |
913 | ||
914 | /** | |
915 | * dma_tasklet - DMA IRQ tasklet | |
916 | * @data: tasklet argument (bam controller structure) | |
917 | * | |
918 | * Sets up next DMA operation and then processes all completed transactions | |
919 | */ | |
920 | static void dma_tasklet(unsigned long data) | |
921 | { | |
922 | struct bam_device *bdev = (struct bam_device *)data; | |
923 | struct bam_chan *bchan; | |
924 | unsigned long flags; | |
925 | unsigned int i; | |
926 | ||
927 | /* go through the channels and kick off transactions */ | |
928 | for (i = 0; i < bdev->num_channels; i++) { | |
929 | bchan = &bdev->channels[i]; | |
930 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
931 | ||
932 | if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) | |
933 | bam_start_dma(bchan); | |
934 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
935 | } | |
936 | } | |
937 | ||
938 | /** | |
939 | * bam_issue_pending - starts pending transactions | |
940 | * @chan: dma channel | |
941 | * | |
942 | * Calls tasklet directly which in turn starts any pending transactions | |
943 | */ | |
944 | static void bam_issue_pending(struct dma_chan *chan) | |
945 | { | |
946 | struct bam_chan *bchan = to_bam_chan(chan); | |
947 | unsigned long flags; | |
948 | ||
949 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
950 | ||
951 | /* if work pending and idle, start a transaction */ | |
952 | if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) | |
953 | bam_start_dma(bchan); | |
954 | ||
955 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
956 | } | |
957 | ||
958 | /** | |
959 | * bam_dma_free_desc - free descriptor memory | |
960 | * @vd: virtual descriptor | |
961 | * | |
962 | */ | |
963 | static void bam_dma_free_desc(struct virt_dma_desc *vd) | |
964 | { | |
965 | struct bam_async_desc *async_desc = container_of(vd, | |
966 | struct bam_async_desc, vd); | |
967 | ||
968 | kfree(async_desc); | |
969 | } | |
970 | ||
971 | static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, | |
972 | struct of_dma *of) | |
973 | { | |
974 | struct bam_device *bdev = container_of(of->of_dma_data, | |
975 | struct bam_device, common); | |
976 | unsigned int request; | |
977 | ||
978 | if (dma_spec->args_count != 1) | |
979 | return NULL; | |
980 | ||
981 | request = dma_spec->args[0]; | |
982 | if (request >= bdev->num_channels) | |
983 | return NULL; | |
984 | ||
985 | return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); | |
986 | } | |
987 | ||
988 | /** | |
989 | * bam_init | |
990 | * @bdev: bam device | |
991 | * | |
992 | * Initialization helper for global bam registers | |
993 | */ | |
994 | static int bam_init(struct bam_device *bdev) | |
995 | { | |
996 | u32 val; | |
997 | ||
998 | /* read revision and configuration information */ | |
fb93f520 | 999 | val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; |
e7c0fe2a AG |
1000 | val &= NUM_EES_MASK; |
1001 | ||
1002 | /* check that configured EE is within range */ | |
1003 | if (bdev->ee >= val) | |
1004 | return -EINVAL; | |
1005 | ||
fb93f520 | 1006 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); |
e7c0fe2a AG |
1007 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; |
1008 | ||
1009 | /* s/w reset bam */ | |
1010 | /* after reset all pipes are disabled and idle */ | |
fb93f520 | 1011 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); |
e7c0fe2a | 1012 | val |= BAM_SW_RST; |
fb93f520 | 1013 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
e7c0fe2a | 1014 | val &= ~BAM_SW_RST; |
fb93f520 | 1015 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
e7c0fe2a AG |
1016 | |
1017 | /* make sure previous stores are visible before enabling BAM */ | |
1018 | wmb(); | |
1019 | ||
1020 | /* enable bam */ | |
1021 | val |= BAM_EN; | |
fb93f520 | 1022 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
e7c0fe2a AG |
1023 | |
1024 | /* set descriptor threshhold, start with 4 bytes */ | |
fb93f520 AT |
1025 | writel_relaxed(DEFAULT_CNT_THRSHLD, |
1026 | bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); | |
e7c0fe2a AG |
1027 | |
1028 | /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ | |
fb93f520 | 1029 | writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); |
e7c0fe2a AG |
1030 | |
1031 | /* enable irqs for errors */ | |
1032 | writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, | |
fb93f520 | 1033 | bam_addr(bdev, 0, BAM_IRQ_EN)); |
e7c0fe2a AG |
1034 | |
1035 | /* unmask global bam interrupt */ | |
fb93f520 | 1036 | writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
e7c0fe2a AG |
1037 | |
1038 | return 0; | |
1039 | } | |
1040 | ||
1041 | static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, | |
1042 | u32 index) | |
1043 | { | |
1044 | bchan->id = index; | |
1045 | bchan->bdev = bdev; | |
1046 | ||
1047 | vchan_init(&bchan->vc, &bdev->common); | |
1048 | bchan->vc.desc_free = bam_dma_free_desc; | |
1049 | } | |
1050 | ||
f43669de AT |
1051 | static const struct of_device_id bam_of_match[] = { |
1052 | { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, | |
1053 | { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, | |
1054 | {} | |
1055 | }; | |
1056 | ||
1057 | MODULE_DEVICE_TABLE(of, bam_of_match); | |
1058 | ||
e7c0fe2a AG |
1059 | static int bam_dma_probe(struct platform_device *pdev) |
1060 | { | |
1061 | struct bam_device *bdev; | |
f43669de | 1062 | const struct of_device_id *match; |
e7c0fe2a AG |
1063 | struct resource *iores; |
1064 | int ret, i; | |
1065 | ||
1066 | bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); | |
1067 | if (!bdev) | |
1068 | return -ENOMEM; | |
1069 | ||
1070 | bdev->dev = &pdev->dev; | |
1071 | ||
f43669de AT |
1072 | match = of_match_node(bam_of_match, pdev->dev.of_node); |
1073 | if (!match) { | |
1074 | dev_err(&pdev->dev, "Unsupported BAM module\n"); | |
1075 | return -ENODEV; | |
1076 | } | |
1077 | ||
1078 | bdev->layout = match->data; | |
1079 | ||
e7c0fe2a AG |
1080 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1081 | bdev->regs = devm_ioremap_resource(&pdev->dev, iores); | |
1082 | if (IS_ERR(bdev->regs)) | |
1083 | return PTR_ERR(bdev->regs); | |
1084 | ||
1085 | bdev->irq = platform_get_irq(pdev, 0); | |
1086 | if (bdev->irq < 0) | |
1087 | return bdev->irq; | |
1088 | ||
1089 | ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); | |
1090 | if (ret) { | |
1091 | dev_err(bdev->dev, "Execution environment unspecified\n"); | |
1092 | return ret; | |
1093 | } | |
1094 | ||
1095 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); | |
1096 | if (IS_ERR(bdev->bamclk)) | |
1097 | return PTR_ERR(bdev->bamclk); | |
1098 | ||
1099 | ret = clk_prepare_enable(bdev->bamclk); | |
1100 | if (ret) { | |
1101 | dev_err(bdev->dev, "failed to prepare/enable clock\n"); | |
1102 | return ret; | |
1103 | } | |
1104 | ||
1105 | ret = bam_init(bdev); | |
1106 | if (ret) | |
1107 | goto err_disable_clk; | |
1108 | ||
1109 | tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); | |
1110 | ||
1111 | bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, | |
1112 | sizeof(*bdev->channels), GFP_KERNEL); | |
1113 | ||
1114 | if (!bdev->channels) { | |
1115 | ret = -ENOMEM; | |
1116 | goto err_disable_clk; | |
1117 | } | |
1118 | ||
1119 | /* allocate and initialize channels */ | |
1120 | INIT_LIST_HEAD(&bdev->common.channels); | |
1121 | ||
1122 | for (i = 0; i < bdev->num_channels; i++) | |
1123 | bam_channel_init(bdev, &bdev->channels[i], i); | |
1124 | ||
1125 | ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, | |
1126 | IRQF_TRIGGER_HIGH, "bam_dma", bdev); | |
1127 | if (ret) | |
1128 | goto err_disable_clk; | |
1129 | ||
1130 | /* set max dma segment size */ | |
1131 | bdev->common.dev = bdev->dev; | |
1132 | bdev->common.dev->dma_parms = &bdev->dma_parms; | |
1133 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); | |
1134 | if (ret) { | |
1135 | dev_err(bdev->dev, "cannot set maximum segment size\n"); | |
1136 | goto err_disable_clk; | |
1137 | } | |
1138 | ||
1139 | platform_set_drvdata(pdev, bdev); | |
1140 | ||
1141 | /* set capabilities */ | |
1142 | dma_cap_zero(bdev->common.cap_mask); | |
1143 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); | |
1144 | ||
1145 | /* initialize dmaengine apis */ | |
1146 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; | |
1147 | bdev->common.device_free_chan_resources = bam_free_chan; | |
1148 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; | |
62ec8eb5 MR |
1149 | bdev->common.device_config = bam_slave_config; |
1150 | bdev->common.device_pause = bam_pause; | |
1151 | bdev->common.device_resume = bam_resume; | |
1152 | bdev->common.device_terminate_all = bam_dma_terminate_all; | |
e7c0fe2a AG |
1153 | bdev->common.device_issue_pending = bam_issue_pending; |
1154 | bdev->common.device_tx_status = bam_tx_status; | |
1155 | bdev->common.dev = bdev->dev; | |
1156 | ||
1157 | ret = dma_async_device_register(&bdev->common); | |
1158 | if (ret) { | |
1159 | dev_err(bdev->dev, "failed to register dma async device\n"); | |
1160 | goto err_disable_clk; | |
1161 | } | |
1162 | ||
1163 | ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, | |
1164 | &bdev->common); | |
1165 | if (ret) | |
1166 | goto err_unregister_dma; | |
1167 | ||
1168 | return 0; | |
1169 | ||
1170 | err_unregister_dma: | |
1171 | dma_async_device_unregister(&bdev->common); | |
1172 | err_disable_clk: | |
1173 | clk_disable_unprepare(bdev->bamclk); | |
1174 | return ret; | |
1175 | } | |
1176 | ||
1177 | static int bam_dma_remove(struct platform_device *pdev) | |
1178 | { | |
1179 | struct bam_device *bdev = platform_get_drvdata(pdev); | |
1180 | u32 i; | |
1181 | ||
1182 | of_dma_controller_free(pdev->dev.of_node); | |
1183 | dma_async_device_unregister(&bdev->common); | |
1184 | ||
1185 | /* mask all interrupts for this execution environment */ | |
fb93f520 | 1186 | writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
e7c0fe2a AG |
1187 | |
1188 | devm_free_irq(bdev->dev, bdev->irq, bdev); | |
1189 | ||
1190 | for (i = 0; i < bdev->num_channels; i++) { | |
62ec8eb5 | 1191 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); |
e7c0fe2a AG |
1192 | tasklet_kill(&bdev->channels[i].vc.task); |
1193 | ||
1194 | dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, | |
1195 | bdev->channels[i].fifo_virt, | |
1196 | bdev->channels[i].fifo_phys); | |
1197 | } | |
1198 | ||
1199 | tasklet_kill(&bdev->task); | |
1200 | ||
1201 | clk_disable_unprepare(bdev->bamclk); | |
1202 | ||
1203 | return 0; | |
1204 | } | |
1205 | ||
e7c0fe2a AG |
1206 | static struct platform_driver bam_dma_driver = { |
1207 | .probe = bam_dma_probe, | |
1208 | .remove = bam_dma_remove, | |
1209 | .driver = { | |
1210 | .name = "bam-dma-engine", | |
e7c0fe2a AG |
1211 | .of_match_table = bam_of_match, |
1212 | }, | |
1213 | }; | |
1214 | ||
1215 | module_platform_driver(bam_dma_driver); | |
1216 | ||
1217 | MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); | |
1218 | MODULE_DESCRIPTION("QCOM BAM DMA engine driver"); | |
1219 | MODULE_LICENSE("GPL v2"); |