Commit | Line | Data |
---|---|---|
e7c0fe2a AG |
1 | /* |
2 | * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | /* | |
15 | * QCOM BAM DMA engine driver | |
16 | * | |
17 | * QCOM BAM DMA blocks are distributed amongst a number of the on-chip | |
18 | * peripherals on the MSM 8x74. The configuration of the channels are dependent | |
19 | * on the way they are hard wired to that specific peripheral. The peripheral | |
20 | * device tree entries specify the configuration of each channel. | |
21 | * | |
22 | * The DMA controller requires the use of external memory for storage of the | |
23 | * hardware descriptors for each channel. The descriptor FIFO is accessed as a | |
24 | * circular buffer and operations are managed according to the offset within the | |
25 | * FIFO. After pipe/channel reset, all of the pipe registers and internal state | |
26 | * are back to defaults. | |
27 | * | |
28 | * During DMA operations, we write descriptors to the FIFO, being careful to | |
29 | * handle wrapping and then write the last FIFO offset to that channel's | |
30 | * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register | |
31 | * indicates the current FIFO offset that is being processed, so there is some | |
32 | * indication of where the hardware is currently working. | |
33 | */ | |
34 | ||
35 | #include <linux/kernel.h> | |
36 | #include <linux/io.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/slab.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/dma-mapping.h> | |
42 | #include <linux/scatterlist.h> | |
43 | #include <linux/device.h> | |
44 | #include <linux/platform_device.h> | |
45 | #include <linux/of.h> | |
46 | #include <linux/of_address.h> | |
47 | #include <linux/of_irq.h> | |
48 | #include <linux/of_dma.h> | |
49 | #include <linux/clk.h> | |
50 | #include <linux/dmaengine.h> | |
51 | ||
52 | #include "dmaengine.h" | |
53 | #include "virt-dma.h" | |
54 | ||
55 | struct bam_desc_hw { | |
56 | u32 addr; /* Buffer physical address */ | |
57 | u16 size; /* Buffer size in bytes */ | |
58 | u16 flags; | |
59 | }; | |
60 | ||
61 | #define DESC_FLAG_INT BIT(15) | |
62 | #define DESC_FLAG_EOT BIT(14) | |
63 | #define DESC_FLAG_EOB BIT(13) | |
64 | ||
65 | struct bam_async_desc { | |
66 | struct virt_dma_desc vd; | |
67 | ||
68 | u32 num_desc; | |
69 | u32 xfer_len; | |
70 | struct bam_desc_hw *curr_desc; | |
71 | ||
72 | enum dma_transfer_direction dir; | |
73 | size_t length; | |
74 | struct bam_desc_hw desc[0]; | |
75 | }; | |
76 | ||
77 | #define BAM_CTRL 0x0000 | |
78 | #define BAM_REVISION 0x0004 | |
79 | #define BAM_SW_REVISION 0x0080 | |
80 | #define BAM_NUM_PIPES 0x003C | |
81 | #define BAM_TIMER 0x0040 | |
82 | #define BAM_TIMER_CTRL 0x0044 | |
83 | #define BAM_DESC_CNT_TRSHLD 0x0008 | |
84 | #define BAM_IRQ_SRCS 0x000C | |
85 | #define BAM_IRQ_SRCS_MSK 0x0010 | |
86 | #define BAM_IRQ_SRCS_UNMASKED 0x0030 | |
87 | #define BAM_IRQ_STTS 0x0014 | |
88 | #define BAM_IRQ_CLR 0x0018 | |
89 | #define BAM_IRQ_EN 0x001C | |
90 | #define BAM_CNFG_BITS 0x007C | |
91 | #define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80)) | |
92 | #define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80)) | |
93 | #define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000)) | |
94 | #define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000)) | |
95 | #define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000)) | |
96 | #define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000)) | |
97 | #define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000)) | |
98 | #define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000)) | |
99 | #define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000)) | |
100 | #define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000)) | |
101 | #define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000)) | |
102 | #define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000)) | |
103 | #define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000)) | |
104 | #define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000)) | |
105 | #define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000)) | |
106 | ||
107 | /* BAM CTRL */ | |
108 | #define BAM_SW_RST BIT(0) | |
109 | #define BAM_EN BIT(1) | |
110 | #define BAM_EN_ACCUM BIT(4) | |
111 | #define BAM_TESTBUS_SEL_SHIFT 5 | |
112 | #define BAM_TESTBUS_SEL_MASK 0x3F | |
113 | #define BAM_DESC_CACHE_SEL_SHIFT 13 | |
114 | #define BAM_DESC_CACHE_SEL_MASK 0x3 | |
115 | #define BAM_CACHED_DESC_STORE BIT(15) | |
116 | #define IBC_DISABLE BIT(16) | |
117 | ||
118 | /* BAM REVISION */ | |
119 | #define REVISION_SHIFT 0 | |
120 | #define REVISION_MASK 0xFF | |
121 | #define NUM_EES_SHIFT 8 | |
122 | #define NUM_EES_MASK 0xF | |
123 | #define CE_BUFFER_SIZE BIT(13) | |
124 | #define AXI_ACTIVE BIT(14) | |
125 | #define USE_VMIDMT BIT(15) | |
126 | #define SECURED BIT(16) | |
127 | #define BAM_HAS_NO_BYPASS BIT(17) | |
128 | #define HIGH_FREQUENCY_BAM BIT(18) | |
129 | #define INACTIV_TMRS_EXST BIT(19) | |
130 | #define NUM_INACTIV_TMRS BIT(20) | |
131 | #define DESC_CACHE_DEPTH_SHIFT 21 | |
132 | #define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) | |
133 | #define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) | |
134 | #define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) | |
135 | #define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) | |
136 | #define CMD_DESC_EN BIT(23) | |
137 | #define INACTIV_TMR_BASE_SHIFT 24 | |
138 | #define INACTIV_TMR_BASE_MASK 0xFF | |
139 | ||
140 | /* BAM NUM PIPES */ | |
141 | #define BAM_NUM_PIPES_SHIFT 0 | |
142 | #define BAM_NUM_PIPES_MASK 0xFF | |
143 | #define PERIPH_NON_PIPE_GRP_SHIFT 16 | |
144 | #define PERIPH_NON_PIP_GRP_MASK 0xFF | |
145 | #define BAM_NON_PIPE_GRP_SHIFT 24 | |
146 | #define BAM_NON_PIPE_GRP_MASK 0xFF | |
147 | ||
148 | /* BAM CNFG BITS */ | |
149 | #define BAM_PIPE_CNFG BIT(2) | |
150 | #define BAM_FULL_PIPE BIT(11) | |
151 | #define BAM_NO_EXT_P_RST BIT(12) | |
152 | #define BAM_IBC_DISABLE BIT(13) | |
153 | #define BAM_SB_CLK_REQ BIT(14) | |
154 | #define BAM_PSM_CSW_REQ BIT(15) | |
155 | #define BAM_PSM_P_RES BIT(16) | |
156 | #define BAM_AU_P_RES BIT(17) | |
157 | #define BAM_SI_P_RES BIT(18) | |
158 | #define BAM_WB_P_RES BIT(19) | |
159 | #define BAM_WB_BLK_CSW BIT(20) | |
160 | #define BAM_WB_CSW_ACK_IDL BIT(21) | |
161 | #define BAM_WB_RETR_SVPNT BIT(22) | |
162 | #define BAM_WB_DSC_AVL_P_RST BIT(23) | |
163 | #define BAM_REG_P_EN BIT(24) | |
164 | #define BAM_PSM_P_HD_DATA BIT(25) | |
165 | #define BAM_AU_ACCUMED BIT(26) | |
166 | #define BAM_CMD_ENABLE BIT(27) | |
167 | ||
168 | #define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ | |
169 | BAM_NO_EXT_P_RST | \ | |
170 | BAM_IBC_DISABLE | \ | |
171 | BAM_SB_CLK_REQ | \ | |
172 | BAM_PSM_CSW_REQ | \ | |
173 | BAM_PSM_P_RES | \ | |
174 | BAM_AU_P_RES | \ | |
175 | BAM_SI_P_RES | \ | |
176 | BAM_WB_P_RES | \ | |
177 | BAM_WB_BLK_CSW | \ | |
178 | BAM_WB_CSW_ACK_IDL | \ | |
179 | BAM_WB_RETR_SVPNT | \ | |
180 | BAM_WB_DSC_AVL_P_RST | \ | |
181 | BAM_REG_P_EN | \ | |
182 | BAM_PSM_P_HD_DATA | \ | |
183 | BAM_AU_ACCUMED | \ | |
184 | BAM_CMD_ENABLE) | |
185 | ||
186 | /* PIPE CTRL */ | |
187 | #define P_EN BIT(1) | |
188 | #define P_DIRECTION BIT(3) | |
189 | #define P_SYS_STRM BIT(4) | |
190 | #define P_SYS_MODE BIT(5) | |
191 | #define P_AUTO_EOB BIT(6) | |
192 | #define P_AUTO_EOB_SEL_SHIFT 7 | |
193 | #define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) | |
194 | #define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) | |
195 | #define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) | |
196 | #define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) | |
197 | #define P_PREFETCH_LIMIT_SHIFT 9 | |
198 | #define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) | |
199 | #define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) | |
200 | #define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) | |
201 | #define P_WRITE_NWD BIT(11) | |
202 | #define P_LOCK_GROUP_SHIFT 16 | |
203 | #define P_LOCK_GROUP_MASK 0x1F | |
204 | ||
205 | /* BAM_DESC_CNT_TRSHLD */ | |
206 | #define CNT_TRSHLD 0xffff | |
207 | #define DEFAULT_CNT_THRSHLD 0x4 | |
208 | ||
209 | /* BAM_IRQ_SRCS */ | |
210 | #define BAM_IRQ BIT(31) | |
211 | #define P_IRQ 0x7fffffff | |
212 | ||
213 | /* BAM_IRQ_SRCS_MSK */ | |
214 | #define BAM_IRQ_MSK BAM_IRQ | |
215 | #define P_IRQ_MSK P_IRQ | |
216 | ||
217 | /* BAM_IRQ_STTS */ | |
218 | #define BAM_TIMER_IRQ BIT(4) | |
219 | #define BAM_EMPTY_IRQ BIT(3) | |
220 | #define BAM_ERROR_IRQ BIT(2) | |
221 | #define BAM_HRESP_ERR_IRQ BIT(1) | |
222 | ||
223 | /* BAM_IRQ_CLR */ | |
224 | #define BAM_TIMER_CLR BIT(4) | |
225 | #define BAM_EMPTY_CLR BIT(3) | |
226 | #define BAM_ERROR_CLR BIT(2) | |
227 | #define BAM_HRESP_ERR_CLR BIT(1) | |
228 | ||
229 | /* BAM_IRQ_EN */ | |
230 | #define BAM_TIMER_EN BIT(4) | |
231 | #define BAM_EMPTY_EN BIT(3) | |
232 | #define BAM_ERROR_EN BIT(2) | |
233 | #define BAM_HRESP_ERR_EN BIT(1) | |
234 | ||
235 | /* BAM_P_IRQ_EN */ | |
236 | #define P_PRCSD_DESC_EN BIT(0) | |
237 | #define P_TIMER_EN BIT(1) | |
238 | #define P_WAKE_EN BIT(2) | |
239 | #define P_OUT_OF_DESC_EN BIT(3) | |
240 | #define P_ERR_EN BIT(4) | |
241 | #define P_TRNSFR_END_EN BIT(5) | |
242 | #define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) | |
243 | ||
244 | /* BAM_P_SW_OFSTS */ | |
245 | #define P_SW_OFSTS_MASK 0xffff | |
246 | ||
247 | #define BAM_DESC_FIFO_SIZE SZ_32K | |
248 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) | |
249 | #define BAM_MAX_DATA_SIZE (SZ_32K - 8) | |
250 | ||
251 | struct bam_chan { | |
252 | struct virt_dma_chan vc; | |
253 | ||
254 | struct bam_device *bdev; | |
255 | ||
256 | /* configuration from device tree */ | |
257 | u32 id; | |
258 | ||
259 | struct bam_async_desc *curr_txd; /* current running dma */ | |
260 | ||
261 | /* runtime configuration */ | |
262 | struct dma_slave_config slave; | |
263 | ||
264 | /* fifo storage */ | |
265 | struct bam_desc_hw *fifo_virt; | |
266 | dma_addr_t fifo_phys; | |
267 | ||
268 | /* fifo markers */ | |
269 | unsigned short head; /* start of active descriptor entries */ | |
270 | unsigned short tail; /* end of active descriptor entries */ | |
271 | ||
272 | unsigned int initialized; /* is the channel hw initialized? */ | |
273 | unsigned int paused; /* is the channel paused? */ | |
274 | unsigned int reconfigure; /* new slave config? */ | |
275 | ||
276 | struct list_head node; | |
277 | }; | |
278 | ||
279 | static inline struct bam_chan *to_bam_chan(struct dma_chan *common) | |
280 | { | |
281 | return container_of(common, struct bam_chan, vc.chan); | |
282 | } | |
283 | ||
284 | struct bam_device { | |
285 | void __iomem *regs; | |
286 | struct device *dev; | |
287 | struct dma_device common; | |
288 | struct device_dma_parameters dma_parms; | |
289 | struct bam_chan *channels; | |
290 | u32 num_channels; | |
291 | ||
292 | /* execution environment ID, from DT */ | |
293 | u32 ee; | |
294 | ||
295 | struct clk *bamclk; | |
296 | int irq; | |
297 | ||
298 | /* dma start transaction tasklet */ | |
299 | struct tasklet_struct task; | |
300 | }; | |
301 | ||
302 | /** | |
303 | * bam_reset_channel - Reset individual BAM DMA channel | |
304 | * @bchan: bam channel | |
305 | * | |
306 | * This function resets a specific BAM channel | |
307 | */ | |
308 | static void bam_reset_channel(struct bam_chan *bchan) | |
309 | { | |
310 | struct bam_device *bdev = bchan->bdev; | |
311 | ||
312 | lockdep_assert_held(&bchan->vc.lock); | |
313 | ||
314 | /* reset channel */ | |
315 | writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id)); | |
316 | writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id)); | |
317 | ||
318 | /* don't allow cpu to reorder BAM register accesses done after this */ | |
319 | wmb(); | |
320 | ||
321 | /* make sure hw is initialized when channel is used the first time */ | |
322 | bchan->initialized = 0; | |
323 | } | |
324 | ||
325 | /** | |
326 | * bam_chan_init_hw - Initialize channel hardware | |
327 | * @bchan: bam channel | |
328 | * | |
329 | * This function resets and initializes the BAM channel | |
330 | */ | |
331 | static void bam_chan_init_hw(struct bam_chan *bchan, | |
332 | enum dma_transfer_direction dir) | |
333 | { | |
334 | struct bam_device *bdev = bchan->bdev; | |
335 | u32 val; | |
336 | ||
337 | /* Reset the channel to clear internal state of the FIFO */ | |
338 | bam_reset_channel(bchan); | |
339 | ||
340 | /* | |
341 | * write out 8 byte aligned address. We have enough space for this | |
342 | * because we allocated 1 more descriptor (8 bytes) than we can use | |
343 | */ | |
344 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), | |
345 | bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id)); | |
346 | writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs + | |
347 | BAM_P_FIFO_SIZES(bchan->id)); | |
348 | ||
349 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ | |
350 | writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id)); | |
351 | ||
352 | /* unmask the specific pipe and EE combo */ | |
353 | val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); | |
354 | val |= BIT(bchan->id); | |
355 | writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); | |
356 | ||
357 | /* don't allow cpu to reorder the channel enable done below */ | |
358 | wmb(); | |
359 | ||
360 | /* set fixed direction and mode, then enable channel */ | |
361 | val = P_EN | P_SYS_MODE; | |
362 | if (dir == DMA_DEV_TO_MEM) | |
363 | val |= P_DIRECTION; | |
364 | ||
365 | writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id)); | |
366 | ||
367 | bchan->initialized = 1; | |
368 | ||
369 | /* init FIFO pointers */ | |
370 | bchan->head = 0; | |
371 | bchan->tail = 0; | |
372 | } | |
373 | ||
374 | /** | |
375 | * bam_alloc_chan - Allocate channel resources for DMA channel. | |
376 | * @chan: specified channel | |
377 | * | |
378 | * This function allocates the FIFO descriptor memory | |
379 | */ | |
380 | static int bam_alloc_chan(struct dma_chan *chan) | |
381 | { | |
382 | struct bam_chan *bchan = to_bam_chan(chan); | |
383 | struct bam_device *bdev = bchan->bdev; | |
384 | ||
385 | if (bchan->fifo_virt) | |
386 | return 0; | |
387 | ||
388 | /* allocate FIFO descriptor space, but only if necessary */ | |
389 | bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, | |
390 | &bchan->fifo_phys, GFP_KERNEL); | |
391 | ||
392 | if (!bchan->fifo_virt) { | |
393 | dev_err(bdev->dev, "Failed to allocate desc fifo\n"); | |
394 | return -ENOMEM; | |
395 | } | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
400 | /** | |
401 | * bam_free_chan - Frees dma resources associated with specific channel | |
402 | * @chan: specified channel | |
403 | * | |
404 | * Free the allocated fifo descriptor memory and channel resources | |
405 | * | |
406 | */ | |
407 | static void bam_free_chan(struct dma_chan *chan) | |
408 | { | |
409 | struct bam_chan *bchan = to_bam_chan(chan); | |
410 | struct bam_device *bdev = bchan->bdev; | |
411 | u32 val; | |
412 | unsigned long flags; | |
413 | ||
414 | vchan_free_chan_resources(to_virt_chan(chan)); | |
415 | ||
416 | if (bchan->curr_txd) { | |
417 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); | |
418 | return; | |
419 | } | |
420 | ||
421 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
422 | bam_reset_channel(bchan); | |
423 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
424 | ||
425 | dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, | |
426 | bchan->fifo_phys); | |
427 | bchan->fifo_virt = NULL; | |
428 | ||
429 | /* mask irq for pipe/channel */ | |
430 | val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); | |
431 | val &= ~BIT(bchan->id); | |
432 | writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); | |
433 | ||
434 | /* disable irq */ | |
435 | writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id)); | |
436 | } | |
437 | ||
438 | /** | |
439 | * bam_slave_config - set slave configuration for channel | |
440 | * @chan: dma channel | |
441 | * @cfg: slave configuration | |
442 | * | |
443 | * Sets slave configuration for channel | |
444 | * | |
445 | */ | |
446 | static void bam_slave_config(struct bam_chan *bchan, | |
447 | struct dma_slave_config *cfg) | |
448 | { | |
449 | memcpy(&bchan->slave, cfg, sizeof(*cfg)); | |
450 | bchan->reconfigure = 1; | |
451 | } | |
452 | ||
453 | /** | |
454 | * bam_prep_slave_sg - Prep slave sg transaction | |
455 | * | |
456 | * @chan: dma channel | |
457 | * @sgl: scatter gather list | |
458 | * @sg_len: length of sg | |
459 | * @direction: DMA transfer direction | |
460 | * @flags: DMA flags | |
461 | * @context: transfer context (unused) | |
462 | */ | |
463 | static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |
464 | struct scatterlist *sgl, unsigned int sg_len, | |
465 | enum dma_transfer_direction direction, unsigned long flags, | |
466 | void *context) | |
467 | { | |
468 | struct bam_chan *bchan = to_bam_chan(chan); | |
469 | struct bam_device *bdev = bchan->bdev; | |
470 | struct bam_async_desc *async_desc; | |
471 | struct scatterlist *sg; | |
472 | u32 i; | |
473 | struct bam_desc_hw *desc; | |
474 | unsigned int num_alloc = 0; | |
475 | ||
476 | ||
477 | if (!is_slave_direction(direction)) { | |
478 | dev_err(bdev->dev, "invalid dma direction\n"); | |
479 | return NULL; | |
480 | } | |
481 | ||
482 | /* calculate number of required entries */ | |
483 | for_each_sg(sgl, sg, sg_len, i) | |
484 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); | |
485 | ||
486 | /* allocate enough room to accomodate the number of entries */ | |
487 | async_desc = kzalloc(sizeof(*async_desc) + | |
488 | (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); | |
489 | ||
490 | if (!async_desc) | |
491 | goto err_out; | |
492 | ||
493 | async_desc->num_desc = num_alloc; | |
494 | async_desc->curr_desc = async_desc->desc; | |
495 | async_desc->dir = direction; | |
496 | ||
497 | /* fill in temporary descriptors */ | |
498 | desc = async_desc->desc; | |
499 | for_each_sg(sgl, sg, sg_len, i) { | |
500 | unsigned int remainder = sg_dma_len(sg); | |
501 | unsigned int curr_offset = 0; | |
502 | ||
503 | do { | |
504 | desc->addr = sg_dma_address(sg) + curr_offset; | |
505 | ||
506 | if (remainder > BAM_MAX_DATA_SIZE) { | |
507 | desc->size = BAM_MAX_DATA_SIZE; | |
508 | remainder -= BAM_MAX_DATA_SIZE; | |
509 | curr_offset += BAM_MAX_DATA_SIZE; | |
510 | } else { | |
511 | desc->size = remainder; | |
512 | remainder = 0; | |
513 | } | |
514 | ||
515 | async_desc->length += desc->size; | |
516 | desc++; | |
517 | } while (remainder > 0); | |
518 | } | |
519 | ||
520 | return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); | |
521 | ||
522 | err_out: | |
523 | kfree(async_desc); | |
524 | return NULL; | |
525 | } | |
526 | ||
527 | /** | |
528 | * bam_dma_terminate_all - terminate all transactions on a channel | |
529 | * @bchan: bam dma channel | |
530 | * | |
531 | * Dequeues and frees all transactions | |
532 | * No callbacks are done | |
533 | * | |
534 | */ | |
535 | static void bam_dma_terminate_all(struct bam_chan *bchan) | |
536 | { | |
537 | unsigned long flag; | |
538 | LIST_HEAD(head); | |
539 | ||
540 | /* remove all transactions, including active transaction */ | |
541 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
542 | if (bchan->curr_txd) { | |
543 | list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); | |
544 | bchan->curr_txd = NULL; | |
545 | } | |
546 | ||
547 | vchan_get_all_descriptors(&bchan->vc, &head); | |
548 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
549 | ||
550 | vchan_dma_desc_free_list(&bchan->vc, &head); | |
551 | } | |
552 | ||
553 | /** | |
554 | * bam_control - DMA device control | |
555 | * @chan: dma channel | |
556 | * @cmd: control cmd | |
557 | * @arg: cmd argument | |
558 | * | |
559 | * Perform DMA control command | |
560 | * | |
561 | */ | |
562 | static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
563 | unsigned long arg) | |
564 | { | |
565 | struct bam_chan *bchan = to_bam_chan(chan); | |
566 | struct bam_device *bdev = bchan->bdev; | |
567 | int ret = 0; | |
568 | unsigned long flag; | |
569 | ||
570 | switch (cmd) { | |
571 | case DMA_PAUSE: | |
572 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
573 | writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id)); | |
574 | bchan->paused = 1; | |
575 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
576 | break; | |
577 | ||
578 | case DMA_RESUME: | |
579 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
580 | writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id)); | |
581 | bchan->paused = 0; | |
582 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
583 | break; | |
584 | ||
585 | case DMA_TERMINATE_ALL: | |
586 | bam_dma_terminate_all(bchan); | |
587 | break; | |
588 | ||
589 | case DMA_SLAVE_CONFIG: | |
590 | spin_lock_irqsave(&bchan->vc.lock, flag); | |
591 | bam_slave_config(bchan, (struct dma_slave_config *)arg); | |
592 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | |
593 | break; | |
594 | ||
595 | default: | |
596 | ret = -ENXIO; | |
597 | break; | |
598 | } | |
599 | ||
600 | return ret; | |
601 | } | |
602 | ||
603 | /** | |
604 | * process_channel_irqs - processes the channel interrupts | |
605 | * @bdev: bam controller | |
606 | * | |
607 | * This function processes the channel interrupts | |
608 | * | |
609 | */ | |
610 | static u32 process_channel_irqs(struct bam_device *bdev) | |
611 | { | |
612 | u32 i, srcs, pipe_stts; | |
613 | unsigned long flags; | |
614 | struct bam_async_desc *async_desc; | |
615 | ||
616 | srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee)); | |
617 | ||
618 | /* return early if no pipe/channel interrupts are present */ | |
619 | if (!(srcs & P_IRQ)) | |
620 | return srcs; | |
621 | ||
622 | for (i = 0; i < bdev->num_channels; i++) { | |
623 | struct bam_chan *bchan = &bdev->channels[i]; | |
624 | ||
625 | if (!(srcs & BIT(i))) | |
626 | continue; | |
627 | ||
628 | /* clear pipe irq */ | |
629 | pipe_stts = readl_relaxed(bdev->regs + | |
630 | BAM_P_IRQ_STTS(i)); | |
631 | ||
632 | writel_relaxed(pipe_stts, bdev->regs + | |
633 | BAM_P_IRQ_CLR(i)); | |
634 | ||
635 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
636 | async_desc = bchan->curr_txd; | |
637 | ||
638 | if (async_desc) { | |
639 | async_desc->num_desc -= async_desc->xfer_len; | |
640 | async_desc->curr_desc += async_desc->xfer_len; | |
641 | bchan->curr_txd = NULL; | |
642 | ||
643 | /* manage FIFO */ | |
644 | bchan->head += async_desc->xfer_len; | |
645 | bchan->head %= MAX_DESCRIPTORS; | |
646 | ||
647 | /* | |
648 | * if complete, process cookie. Otherwise | |
649 | * push back to front of desc_issued so that | |
650 | * it gets restarted by the tasklet | |
651 | */ | |
652 | if (!async_desc->num_desc) | |
653 | vchan_cookie_complete(&async_desc->vd); | |
654 | else | |
655 | list_add(&async_desc->vd.node, | |
656 | &bchan->vc.desc_issued); | |
657 | } | |
658 | ||
659 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
660 | } | |
661 | ||
662 | return srcs; | |
663 | } | |
664 | ||
665 | /** | |
666 | * bam_dma_irq - irq handler for bam controller | |
667 | * @irq: IRQ of interrupt | |
668 | * @data: callback data | |
669 | * | |
670 | * IRQ handler for the bam controller | |
671 | */ | |
672 | static irqreturn_t bam_dma_irq(int irq, void *data) | |
673 | { | |
674 | struct bam_device *bdev = data; | |
675 | u32 clr_mask = 0, srcs = 0; | |
676 | ||
677 | srcs |= process_channel_irqs(bdev); | |
678 | ||
679 | /* kick off tasklet to start next dma transfer */ | |
680 | if (srcs & P_IRQ) | |
681 | tasklet_schedule(&bdev->task); | |
682 | ||
683 | if (srcs & BAM_IRQ) | |
684 | clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS); | |
685 | ||
686 | /* don't allow reorder of the various accesses to the BAM registers */ | |
687 | mb(); | |
688 | ||
689 | writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR); | |
690 | ||
691 | return IRQ_HANDLED; | |
692 | } | |
693 | ||
694 | /** | |
695 | * bam_tx_status - returns status of transaction | |
696 | * @chan: dma channel | |
697 | * @cookie: transaction cookie | |
698 | * @txstate: DMA transaction state | |
699 | * | |
700 | * Return status of dma transaction | |
701 | */ | |
702 | static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
703 | struct dma_tx_state *txstate) | |
704 | { | |
705 | struct bam_chan *bchan = to_bam_chan(chan); | |
706 | struct virt_dma_desc *vd; | |
707 | int ret; | |
708 | size_t residue = 0; | |
709 | unsigned int i; | |
710 | unsigned long flags; | |
711 | ||
712 | ret = dma_cookie_status(chan, cookie, txstate); | |
713 | if (ret == DMA_COMPLETE) | |
714 | return ret; | |
715 | ||
716 | if (!txstate) | |
717 | return bchan->paused ? DMA_PAUSED : ret; | |
718 | ||
719 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
720 | vd = vchan_find_desc(&bchan->vc, cookie); | |
721 | if (vd) | |
722 | residue = container_of(vd, struct bam_async_desc, vd)->length; | |
723 | else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) | |
724 | for (i = 0; i < bchan->curr_txd->num_desc; i++) | |
725 | residue += bchan->curr_txd->curr_desc[i].size; | |
726 | ||
727 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
728 | ||
729 | dma_set_residue(txstate, residue); | |
730 | ||
731 | if (ret == DMA_IN_PROGRESS && bchan->paused) | |
732 | ret = DMA_PAUSED; | |
733 | ||
734 | return ret; | |
735 | } | |
736 | ||
737 | /** | |
738 | * bam_apply_new_config | |
739 | * @bchan: bam dma channel | |
740 | * @dir: DMA direction | |
741 | */ | |
742 | static void bam_apply_new_config(struct bam_chan *bchan, | |
743 | enum dma_transfer_direction dir) | |
744 | { | |
745 | struct bam_device *bdev = bchan->bdev; | |
746 | u32 maxburst; | |
747 | ||
748 | if (dir == DMA_DEV_TO_MEM) | |
749 | maxburst = bchan->slave.src_maxburst; | |
750 | else | |
751 | maxburst = bchan->slave.dst_maxburst; | |
752 | ||
753 | writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD); | |
754 | ||
755 | bchan->reconfigure = 0; | |
756 | } | |
757 | ||
758 | /** | |
759 | * bam_start_dma - start next transaction | |
760 | * @bchan - bam dma channel | |
761 | */ | |
762 | static void bam_start_dma(struct bam_chan *bchan) | |
763 | { | |
764 | struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); | |
765 | struct bam_device *bdev = bchan->bdev; | |
766 | struct bam_async_desc *async_desc; | |
767 | struct bam_desc_hw *desc; | |
768 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, | |
769 | sizeof(struct bam_desc_hw)); | |
770 | ||
771 | lockdep_assert_held(&bchan->vc.lock); | |
772 | ||
773 | if (!vd) | |
774 | return; | |
775 | ||
776 | list_del(&vd->node); | |
777 | ||
778 | async_desc = container_of(vd, struct bam_async_desc, vd); | |
779 | bchan->curr_txd = async_desc; | |
780 | ||
781 | /* on first use, initialize the channel hardware */ | |
782 | if (!bchan->initialized) | |
783 | bam_chan_init_hw(bchan, async_desc->dir); | |
784 | ||
785 | /* apply new slave config changes, if necessary */ | |
786 | if (bchan->reconfigure) | |
787 | bam_apply_new_config(bchan, async_desc->dir); | |
788 | ||
789 | desc = bchan->curr_txd->curr_desc; | |
790 | ||
791 | if (async_desc->num_desc > MAX_DESCRIPTORS) | |
792 | async_desc->xfer_len = MAX_DESCRIPTORS; | |
793 | else | |
794 | async_desc->xfer_len = async_desc->num_desc; | |
795 | ||
796 | /* set INT on last descriptor */ | |
797 | desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; | |
798 | ||
799 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | |
800 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | |
801 | ||
802 | memcpy(&fifo[bchan->tail], desc, | |
803 | partial * sizeof(struct bam_desc_hw)); | |
804 | memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * | |
805 | sizeof(struct bam_desc_hw)); | |
806 | } else { | |
807 | memcpy(&fifo[bchan->tail], desc, | |
808 | async_desc->xfer_len * sizeof(struct bam_desc_hw)); | |
809 | } | |
810 | ||
811 | bchan->tail += async_desc->xfer_len; | |
812 | bchan->tail %= MAX_DESCRIPTORS; | |
813 | ||
814 | /* ensure descriptor writes and dma start not reordered */ | |
815 | wmb(); | |
816 | writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), | |
817 | bdev->regs + BAM_P_EVNT_REG(bchan->id)); | |
818 | } | |
819 | ||
820 | /** | |
821 | * dma_tasklet - DMA IRQ tasklet | |
822 | * @data: tasklet argument (bam controller structure) | |
823 | * | |
824 | * Sets up next DMA operation and then processes all completed transactions | |
825 | */ | |
826 | static void dma_tasklet(unsigned long data) | |
827 | { | |
828 | struct bam_device *bdev = (struct bam_device *)data; | |
829 | struct bam_chan *bchan; | |
830 | unsigned long flags; | |
831 | unsigned int i; | |
832 | ||
833 | /* go through the channels and kick off transactions */ | |
834 | for (i = 0; i < bdev->num_channels; i++) { | |
835 | bchan = &bdev->channels[i]; | |
836 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
837 | ||
838 | if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) | |
839 | bam_start_dma(bchan); | |
840 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
841 | } | |
842 | } | |
843 | ||
844 | /** | |
845 | * bam_issue_pending - starts pending transactions | |
846 | * @chan: dma channel | |
847 | * | |
848 | * Calls tasklet directly which in turn starts any pending transactions | |
849 | */ | |
850 | static void bam_issue_pending(struct dma_chan *chan) | |
851 | { | |
852 | struct bam_chan *bchan = to_bam_chan(chan); | |
853 | unsigned long flags; | |
854 | ||
855 | spin_lock_irqsave(&bchan->vc.lock, flags); | |
856 | ||
857 | /* if work pending and idle, start a transaction */ | |
858 | if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) | |
859 | bam_start_dma(bchan); | |
860 | ||
861 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | |
862 | } | |
863 | ||
864 | /** | |
865 | * bam_dma_free_desc - free descriptor memory | |
866 | * @vd: virtual descriptor | |
867 | * | |
868 | */ | |
869 | static void bam_dma_free_desc(struct virt_dma_desc *vd) | |
870 | { | |
871 | struct bam_async_desc *async_desc = container_of(vd, | |
872 | struct bam_async_desc, vd); | |
873 | ||
874 | kfree(async_desc); | |
875 | } | |
876 | ||
877 | static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, | |
878 | struct of_dma *of) | |
879 | { | |
880 | struct bam_device *bdev = container_of(of->of_dma_data, | |
881 | struct bam_device, common); | |
882 | unsigned int request; | |
883 | ||
884 | if (dma_spec->args_count != 1) | |
885 | return NULL; | |
886 | ||
887 | request = dma_spec->args[0]; | |
888 | if (request >= bdev->num_channels) | |
889 | return NULL; | |
890 | ||
891 | return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); | |
892 | } | |
893 | ||
894 | /** | |
895 | * bam_init | |
896 | * @bdev: bam device | |
897 | * | |
898 | * Initialization helper for global bam registers | |
899 | */ | |
900 | static int bam_init(struct bam_device *bdev) | |
901 | { | |
902 | u32 val; | |
903 | ||
904 | /* read revision and configuration information */ | |
905 | val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT; | |
906 | val &= NUM_EES_MASK; | |
907 | ||
908 | /* check that configured EE is within range */ | |
909 | if (bdev->ee >= val) | |
910 | return -EINVAL; | |
911 | ||
912 | val = readl_relaxed(bdev->regs + BAM_NUM_PIPES); | |
913 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; | |
914 | ||
915 | /* s/w reset bam */ | |
916 | /* after reset all pipes are disabled and idle */ | |
917 | val = readl_relaxed(bdev->regs + BAM_CTRL); | |
918 | val |= BAM_SW_RST; | |
919 | writel_relaxed(val, bdev->regs + BAM_CTRL); | |
920 | val &= ~BAM_SW_RST; | |
921 | writel_relaxed(val, bdev->regs + BAM_CTRL); | |
922 | ||
923 | /* make sure previous stores are visible before enabling BAM */ | |
924 | wmb(); | |
925 | ||
926 | /* enable bam */ | |
927 | val |= BAM_EN; | |
928 | writel_relaxed(val, bdev->regs + BAM_CTRL); | |
929 | ||
930 | /* set descriptor threshhold, start with 4 bytes */ | |
931 | writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD); | |
932 | ||
933 | /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ | |
934 | writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS); | |
935 | ||
936 | /* enable irqs for errors */ | |
937 | writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, | |
938 | bdev->regs + BAM_IRQ_EN); | |
939 | ||
940 | /* unmask global bam interrupt */ | |
941 | writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); | |
942 | ||
943 | return 0; | |
944 | } | |
945 | ||
946 | static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, | |
947 | u32 index) | |
948 | { | |
949 | bchan->id = index; | |
950 | bchan->bdev = bdev; | |
951 | ||
952 | vchan_init(&bchan->vc, &bdev->common); | |
953 | bchan->vc.desc_free = bam_dma_free_desc; | |
954 | } | |
955 | ||
956 | static int bam_dma_probe(struct platform_device *pdev) | |
957 | { | |
958 | struct bam_device *bdev; | |
959 | struct resource *iores; | |
960 | int ret, i; | |
961 | ||
962 | bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); | |
963 | if (!bdev) | |
964 | return -ENOMEM; | |
965 | ||
966 | bdev->dev = &pdev->dev; | |
967 | ||
968 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
969 | bdev->regs = devm_ioremap_resource(&pdev->dev, iores); | |
970 | if (IS_ERR(bdev->regs)) | |
971 | return PTR_ERR(bdev->regs); | |
972 | ||
973 | bdev->irq = platform_get_irq(pdev, 0); | |
974 | if (bdev->irq < 0) | |
975 | return bdev->irq; | |
976 | ||
977 | ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); | |
978 | if (ret) { | |
979 | dev_err(bdev->dev, "Execution environment unspecified\n"); | |
980 | return ret; | |
981 | } | |
982 | ||
983 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); | |
984 | if (IS_ERR(bdev->bamclk)) | |
985 | return PTR_ERR(bdev->bamclk); | |
986 | ||
987 | ret = clk_prepare_enable(bdev->bamclk); | |
988 | if (ret) { | |
989 | dev_err(bdev->dev, "failed to prepare/enable clock\n"); | |
990 | return ret; | |
991 | } | |
992 | ||
993 | ret = bam_init(bdev); | |
994 | if (ret) | |
995 | goto err_disable_clk; | |
996 | ||
997 | tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); | |
998 | ||
999 | bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, | |
1000 | sizeof(*bdev->channels), GFP_KERNEL); | |
1001 | ||
1002 | if (!bdev->channels) { | |
1003 | ret = -ENOMEM; | |
1004 | goto err_disable_clk; | |
1005 | } | |
1006 | ||
1007 | /* allocate and initialize channels */ | |
1008 | INIT_LIST_HEAD(&bdev->common.channels); | |
1009 | ||
1010 | for (i = 0; i < bdev->num_channels; i++) | |
1011 | bam_channel_init(bdev, &bdev->channels[i], i); | |
1012 | ||
1013 | ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, | |
1014 | IRQF_TRIGGER_HIGH, "bam_dma", bdev); | |
1015 | if (ret) | |
1016 | goto err_disable_clk; | |
1017 | ||
1018 | /* set max dma segment size */ | |
1019 | bdev->common.dev = bdev->dev; | |
1020 | bdev->common.dev->dma_parms = &bdev->dma_parms; | |
1021 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); | |
1022 | if (ret) { | |
1023 | dev_err(bdev->dev, "cannot set maximum segment size\n"); | |
1024 | goto err_disable_clk; | |
1025 | } | |
1026 | ||
1027 | platform_set_drvdata(pdev, bdev); | |
1028 | ||
1029 | /* set capabilities */ | |
1030 | dma_cap_zero(bdev->common.cap_mask); | |
1031 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); | |
1032 | ||
1033 | /* initialize dmaengine apis */ | |
1034 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; | |
1035 | bdev->common.device_free_chan_resources = bam_free_chan; | |
1036 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; | |
1037 | bdev->common.device_control = bam_control; | |
1038 | bdev->common.device_issue_pending = bam_issue_pending; | |
1039 | bdev->common.device_tx_status = bam_tx_status; | |
1040 | bdev->common.dev = bdev->dev; | |
1041 | ||
1042 | ret = dma_async_device_register(&bdev->common); | |
1043 | if (ret) { | |
1044 | dev_err(bdev->dev, "failed to register dma async device\n"); | |
1045 | goto err_disable_clk; | |
1046 | } | |
1047 | ||
1048 | ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, | |
1049 | &bdev->common); | |
1050 | if (ret) | |
1051 | goto err_unregister_dma; | |
1052 | ||
1053 | return 0; | |
1054 | ||
1055 | err_unregister_dma: | |
1056 | dma_async_device_unregister(&bdev->common); | |
1057 | err_disable_clk: | |
1058 | clk_disable_unprepare(bdev->bamclk); | |
1059 | return ret; | |
1060 | } | |
1061 | ||
1062 | static int bam_dma_remove(struct platform_device *pdev) | |
1063 | { | |
1064 | struct bam_device *bdev = platform_get_drvdata(pdev); | |
1065 | u32 i; | |
1066 | ||
1067 | of_dma_controller_free(pdev->dev.of_node); | |
1068 | dma_async_device_unregister(&bdev->common); | |
1069 | ||
1070 | /* mask all interrupts for this execution environment */ | |
1071 | writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); | |
1072 | ||
1073 | devm_free_irq(bdev->dev, bdev->irq, bdev); | |
1074 | ||
1075 | for (i = 0; i < bdev->num_channels; i++) { | |
1076 | bam_dma_terminate_all(&bdev->channels[i]); | |
1077 | tasklet_kill(&bdev->channels[i].vc.task); | |
1078 | ||
1079 | dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, | |
1080 | bdev->channels[i].fifo_virt, | |
1081 | bdev->channels[i].fifo_phys); | |
1082 | } | |
1083 | ||
1084 | tasklet_kill(&bdev->task); | |
1085 | ||
1086 | clk_disable_unprepare(bdev->bamclk); | |
1087 | ||
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | static const struct of_device_id bam_of_match[] = { | |
1092 | { .compatible = "qcom,bam-v1.4.0", }, | |
1093 | {} | |
1094 | }; | |
1095 | MODULE_DEVICE_TABLE(of, bam_of_match); | |
1096 | ||
1097 | static struct platform_driver bam_dma_driver = { | |
1098 | .probe = bam_dma_probe, | |
1099 | .remove = bam_dma_remove, | |
1100 | .driver = { | |
1101 | .name = "bam-dma-engine", | |
1102 | .owner = THIS_MODULE, | |
1103 | .of_match_table = bam_of_match, | |
1104 | }, | |
1105 | }; | |
1106 | ||
1107 | module_platform_driver(bam_dma_driver); | |
1108 | ||
1109 | MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); | |
1110 | MODULE_DESCRIPTION("QCOM BAM DMA engine driver"); | |
1111 | MODULE_LICENSE("GPL v2"); |