Commit | Line | Data |
---|---|---|
ca21a146 RY |
1 | /* |
2 | * DMA controller driver for CSR SiRFprimaII | |
3 | * | |
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
5 | * | |
6 | * Licensed under GPLv2 or later. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/of_address.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/of_platform.h> | |
19 | #include <linux/sirfsoc_dma.h> | |
20 | ||
949ff5b8 VK |
21 | #include "dmaengine.h" |
22 | ||
ca21a146 RY |
23 | #define SIRFSOC_DMA_DESCRIPTORS 16 |
24 | #define SIRFSOC_DMA_CHANNELS 16 | |
25 | ||
26 | #define SIRFSOC_DMA_CH_ADDR 0x00 | |
27 | #define SIRFSOC_DMA_CH_XLEN 0x04 | |
28 | #define SIRFSOC_DMA_CH_YLEN 0x08 | |
29 | #define SIRFSOC_DMA_CH_CTRL 0x0C | |
30 | ||
31 | #define SIRFSOC_DMA_WIDTH_0 0x100 | |
32 | #define SIRFSOC_DMA_CH_VALID 0x140 | |
33 | #define SIRFSOC_DMA_CH_INT 0x144 | |
34 | #define SIRFSOC_DMA_INT_EN 0x148 | |
35 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 | |
36 | ||
37 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | |
38 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | |
39 | ||
40 | /* xlen and dma_width register is in 4 bytes boundary */ | |
41 | #define SIRFSOC_DMA_WORD_LEN 4 | |
42 | ||
43 | struct sirfsoc_dma_desc { | |
44 | struct dma_async_tx_descriptor desc; | |
45 | struct list_head node; | |
46 | ||
47 | /* SiRFprimaII 2D-DMA parameters */ | |
48 | ||
49 | int xlen; /* DMA xlen */ | |
50 | int ylen; /* DMA ylen */ | |
51 | int width; /* DMA width */ | |
52 | int dir; | |
53 | bool cyclic; /* is loop DMA? */ | |
54 | u32 addr; /* DMA buffer address */ | |
55 | }; | |
56 | ||
57 | struct sirfsoc_dma_chan { | |
58 | struct dma_chan chan; | |
59 | struct list_head free; | |
60 | struct list_head prepared; | |
61 | struct list_head queued; | |
62 | struct list_head active; | |
63 | struct list_head completed; | |
ca21a146 RY |
64 | unsigned long happened_cyclic; |
65 | unsigned long completed_cyclic; | |
66 | ||
67 | /* Lock for this structure */ | |
68 | spinlock_t lock; | |
69 | ||
70 | int mode; | |
71 | }; | |
72 | ||
73 | struct sirfsoc_dma { | |
74 | struct dma_device dma; | |
75 | struct tasklet_struct tasklet; | |
76 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | |
77 | void __iomem *base; | |
78 | int irq; | |
79 | }; | |
80 | ||
81 | #define DRV_NAME "sirfsoc_dma" | |
82 | ||
83 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | |
84 | static inline | |
85 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | |
86 | { | |
87 | return container_of(c, struct sirfsoc_dma_chan, chan); | |
88 | } | |
89 | ||
90 | /* Convert struct dma_chan to struct sirfsoc_dma */ | |
91 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | |
92 | { | |
93 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | |
94 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | |
95 | } | |
96 | ||
97 | /* Execute all queued DMA descriptors */ | |
98 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | |
99 | { | |
100 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
101 | int cid = schan->chan.chan_id; | |
102 | struct sirfsoc_dma_desc *sdesc = NULL; | |
103 | ||
104 | /* | |
105 | * lock has been held by functions calling this, so we don't hold | |
106 | * lock again | |
107 | */ | |
108 | ||
109 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | |
110 | node); | |
111 | /* Move the first queued descriptor to active list */ | |
26fd1220 | 112 | list_move_tail(&sdesc->node, &schan->active); |
ca21a146 RY |
113 | |
114 | /* Start the DMA transfer */ | |
115 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | |
116 | cid * 4); | |
117 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
118 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
119 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
120 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | |
121 | SIRFSOC_DMA_CH_XLEN); | |
122 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | |
123 | SIRFSOC_DMA_CH_YLEN); | |
124 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | |
125 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
126 | ||
127 | /* | |
128 | * writel has an implict memory write barrier to make sure data is | |
129 | * flushed into memory before starting DMA | |
130 | */ | |
131 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
132 | ||
133 | if (sdesc->cyclic) { | |
134 | writel((1 << cid) | 1 << (cid + 16) | | |
135 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | |
136 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
137 | schan->happened_cyclic = schan->completed_cyclic = 0; | |
138 | } | |
139 | } | |
140 | ||
141 | /* Interrupt handler */ | |
142 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | |
143 | { | |
144 | struct sirfsoc_dma *sdma = data; | |
145 | struct sirfsoc_dma_chan *schan; | |
146 | struct sirfsoc_dma_desc *sdesc = NULL; | |
147 | u32 is; | |
148 | int ch; | |
149 | ||
150 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | |
151 | while ((ch = fls(is) - 1) >= 0) { | |
152 | is &= ~(1 << ch); | |
153 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | |
154 | schan = &sdma->channels[ch]; | |
155 | ||
156 | spin_lock(&schan->lock); | |
157 | ||
158 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
159 | node); | |
160 | if (!sdesc->cyclic) { | |
161 | /* Execute queued descriptors */ | |
162 | list_splice_tail_init(&schan->active, &schan->completed); | |
163 | if (!list_empty(&schan->queued)) | |
164 | sirfsoc_dma_execute(schan); | |
165 | } else | |
166 | schan->happened_cyclic++; | |
167 | ||
168 | spin_unlock(&schan->lock); | |
169 | } | |
170 | ||
171 | /* Schedule tasklet */ | |
172 | tasklet_schedule(&sdma->tasklet); | |
173 | ||
174 | return IRQ_HANDLED; | |
175 | } | |
176 | ||
177 | /* process completed descriptors */ | |
178 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |
179 | { | |
180 | dma_cookie_t last_cookie = 0; | |
181 | struct sirfsoc_dma_chan *schan; | |
182 | struct sirfsoc_dma_desc *sdesc; | |
183 | struct dma_async_tx_descriptor *desc; | |
184 | unsigned long flags; | |
185 | unsigned long happened_cyclic; | |
186 | LIST_HEAD(list); | |
187 | int i; | |
188 | ||
189 | for (i = 0; i < sdma->dma.chancnt; i++) { | |
190 | schan = &sdma->channels[i]; | |
191 | ||
192 | /* Get all completed descriptors */ | |
193 | spin_lock_irqsave(&schan->lock, flags); | |
194 | if (!list_empty(&schan->completed)) { | |
195 | list_splice_tail_init(&schan->completed, &list); | |
196 | spin_unlock_irqrestore(&schan->lock, flags); | |
197 | ||
198 | /* Execute callbacks and run dependencies */ | |
199 | list_for_each_entry(sdesc, &list, node) { | |
200 | desc = &sdesc->desc; | |
201 | ||
202 | if (desc->callback) | |
203 | desc->callback(desc->callback_param); | |
204 | ||
205 | last_cookie = desc->cookie; | |
206 | dma_run_dependencies(desc); | |
207 | } | |
208 | ||
209 | /* Free descriptors */ | |
210 | spin_lock_irqsave(&schan->lock, flags); | |
211 | list_splice_tail_init(&list, &schan->free); | |
4d4e58de | 212 | schan->chan.completed_cookie = last_cookie; |
ca21a146 RY |
213 | spin_unlock_irqrestore(&schan->lock, flags); |
214 | } else { | |
215 | /* for cyclic channel, desc is always in active list */ | |
216 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
217 | node); | |
218 | ||
219 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | |
220 | /* without active cyclic DMA */ | |
221 | spin_unlock_irqrestore(&schan->lock, flags); | |
222 | continue; | |
223 | } | |
224 | ||
225 | /* cyclic DMA */ | |
226 | happened_cyclic = schan->happened_cyclic; | |
227 | spin_unlock_irqrestore(&schan->lock, flags); | |
228 | ||
229 | desc = &sdesc->desc; | |
230 | while (happened_cyclic != schan->completed_cyclic) { | |
231 | if (desc->callback) | |
232 | desc->callback(desc->callback_param); | |
233 | schan->completed_cyclic++; | |
234 | } | |
235 | } | |
236 | } | |
237 | } | |
238 | ||
239 | /* DMA Tasklet */ | |
240 | static void sirfsoc_dma_tasklet(unsigned long data) | |
241 | { | |
242 | struct sirfsoc_dma *sdma = (void *)data; | |
243 | ||
244 | sirfsoc_dma_process_completed(sdma); | |
245 | } | |
246 | ||
247 | /* Submit descriptor to hardware */ | |
248 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
249 | { | |
250 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | |
251 | struct sirfsoc_dma_desc *sdesc; | |
252 | unsigned long flags; | |
253 | dma_cookie_t cookie; | |
254 | ||
255 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | |
256 | ||
257 | spin_lock_irqsave(&schan->lock, flags); | |
258 | ||
259 | /* Move descriptor to queue */ | |
260 | list_move_tail(&sdesc->node, &schan->queued); | |
261 | ||
884485e1 | 262 | cookie = dma_cookie_assign(txd); |
ca21a146 RY |
263 | |
264 | spin_unlock_irqrestore(&schan->lock, flags); | |
265 | ||
266 | return cookie; | |
267 | } | |
268 | ||
269 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | |
270 | struct dma_slave_config *config) | |
271 | { | |
272 | unsigned long flags; | |
273 | ||
274 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
275 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
276 | return -EINVAL; | |
277 | ||
278 | spin_lock_irqsave(&schan->lock, flags); | |
279 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | |
280 | spin_unlock_irqrestore(&schan->lock, flags); | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
285 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | |
286 | { | |
287 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
288 | int cid = schan->chan.chan_id; | |
289 | unsigned long flags; | |
290 | ||
291 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | |
292 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
293 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
294 | ||
295 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
296 | & ~((1 << cid) | 1 << (cid + 16)), | |
297 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
298 | ||
299 | spin_lock_irqsave(&schan->lock, flags); | |
300 | list_splice_tail_init(&schan->active, &schan->free); | |
301 | list_splice_tail_init(&schan->queued, &schan->free); | |
302 | spin_unlock_irqrestore(&schan->lock, flags); | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
307 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
308 | unsigned long arg) | |
309 | { | |
310 | struct dma_slave_config *config; | |
311 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
312 | ||
313 | switch (cmd) { | |
314 | case DMA_TERMINATE_ALL: | |
315 | return sirfsoc_dma_terminate_all(schan); | |
316 | case DMA_SLAVE_CONFIG: | |
317 | config = (struct dma_slave_config *)arg; | |
318 | return sirfsoc_dma_slave_config(schan, config); | |
319 | ||
320 | default: | |
321 | break; | |
322 | } | |
323 | ||
324 | return -ENOSYS; | |
325 | } | |
326 | ||
327 | /* Alloc channel resources */ | |
328 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |
329 | { | |
330 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
331 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
332 | struct sirfsoc_dma_desc *sdesc; | |
333 | unsigned long flags; | |
334 | LIST_HEAD(descs); | |
335 | int i; | |
336 | ||
337 | /* Alloc descriptors for this channel */ | |
338 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | |
339 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | |
340 | if (!sdesc) { | |
341 | dev_notice(sdma->dma.dev, "Memory allocation error. " | |
342 | "Allocated only %u descriptors\n", i); | |
343 | break; | |
344 | } | |
345 | ||
346 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | |
347 | sdesc->desc.flags = DMA_CTRL_ACK; | |
348 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | |
349 | ||
350 | list_add_tail(&sdesc->node, &descs); | |
351 | } | |
352 | ||
353 | /* Return error only if no descriptors were allocated */ | |
354 | if (i == 0) | |
355 | return -ENOMEM; | |
356 | ||
357 | spin_lock_irqsave(&schan->lock, flags); | |
358 | ||
359 | list_splice_tail_init(&descs, &schan->free); | |
360 | spin_unlock_irqrestore(&schan->lock, flags); | |
361 | ||
362 | return i; | |
363 | } | |
364 | ||
365 | /* Free channel resources */ | |
366 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |
367 | { | |
368 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
369 | struct sirfsoc_dma_desc *sdesc, *tmp; | |
370 | unsigned long flags; | |
371 | LIST_HEAD(descs); | |
372 | ||
373 | spin_lock_irqsave(&schan->lock, flags); | |
374 | ||
375 | /* Channel must be idle */ | |
376 | BUG_ON(!list_empty(&schan->prepared)); | |
377 | BUG_ON(!list_empty(&schan->queued)); | |
378 | BUG_ON(!list_empty(&schan->active)); | |
379 | BUG_ON(!list_empty(&schan->completed)); | |
380 | ||
381 | /* Move data */ | |
382 | list_splice_tail_init(&schan->free, &descs); | |
383 | ||
384 | spin_unlock_irqrestore(&schan->lock, flags); | |
385 | ||
386 | /* Free descriptors */ | |
387 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | |
388 | kfree(sdesc); | |
389 | } | |
390 | ||
391 | /* Send pending descriptor to hardware */ | |
392 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | |
393 | { | |
394 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
395 | unsigned long flags; | |
396 | ||
397 | spin_lock_irqsave(&schan->lock, flags); | |
398 | ||
399 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | |
400 | sirfsoc_dma_execute(schan); | |
401 | ||
402 | spin_unlock_irqrestore(&schan->lock, flags); | |
403 | } | |
404 | ||
405 | /* Check request completion status */ | |
406 | static enum dma_status | |
407 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
408 | struct dma_tx_state *txstate) | |
409 | { | |
410 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
411 | unsigned long flags; | |
96a2af41 | 412 | enum dma_status ret; |
ca21a146 RY |
413 | |
414 | spin_lock_irqsave(&schan->lock, flags); | |
96a2af41 | 415 | ret = dma_cookie_status(chan, cookie, txstate); |
ca21a146 RY |
416 | spin_unlock_irqrestore(&schan->lock, flags); |
417 | ||
96a2af41 | 418 | return ret; |
ca21a146 RY |
419 | } |
420 | ||
421 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | |
422 | struct dma_chan *chan, struct dma_interleaved_template *xt, | |
423 | unsigned long flags) | |
424 | { | |
425 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
426 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
427 | struct sirfsoc_dma_desc *sdesc = NULL; | |
428 | unsigned long iflags; | |
429 | int ret; | |
430 | ||
5997e089 | 431 | if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { |
ca21a146 RY |
432 | ret = -EINVAL; |
433 | goto err_dir; | |
434 | } | |
435 | ||
436 | /* Get free descriptor */ | |
437 | spin_lock_irqsave(&schan->lock, iflags); | |
438 | if (!list_empty(&schan->free)) { | |
439 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
440 | node); | |
441 | list_del(&sdesc->node); | |
442 | } | |
443 | spin_unlock_irqrestore(&schan->lock, iflags); | |
444 | ||
445 | if (!sdesc) { | |
446 | /* try to free completed descriptors */ | |
447 | sirfsoc_dma_process_completed(sdma); | |
448 | ret = 0; | |
449 | goto no_desc; | |
450 | } | |
451 | ||
452 | /* Place descriptor in prepared list */ | |
453 | spin_lock_irqsave(&schan->lock, iflags); | |
454 | ||
455 | /* | |
456 | * Number of chunks in a frame can only be 1 for prima2 | |
457 | * and ylen (number of frame - 1) must be at least 0 | |
458 | */ | |
459 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | |
460 | sdesc->cyclic = 0; | |
461 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | |
462 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | |
463 | SIRFSOC_DMA_WORD_LEN; | |
464 | sdesc->ylen = xt->numf - 1; | |
465 | if (xt->dir == DMA_MEM_TO_DEV) { | |
466 | sdesc->addr = xt->src_start; | |
467 | sdesc->dir = 1; | |
468 | } else { | |
469 | sdesc->addr = xt->dst_start; | |
470 | sdesc->dir = 0; | |
471 | } | |
472 | ||
473 | list_add_tail(&sdesc->node, &schan->prepared); | |
474 | } else { | |
475 | pr_err("sirfsoc DMA Invalid xfer\n"); | |
476 | ret = -EINVAL; | |
477 | goto err_xfer; | |
478 | } | |
479 | spin_unlock_irqrestore(&schan->lock, iflags); | |
480 | ||
481 | return &sdesc->desc; | |
482 | err_xfer: | |
483 | spin_unlock_irqrestore(&schan->lock, iflags); | |
484 | no_desc: | |
485 | err_dir: | |
486 | return ERR_PTR(ret); | |
487 | } | |
488 | ||
489 | static struct dma_async_tx_descriptor * | |
490 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |
491 | size_t buf_len, size_t period_len, | |
ec8b5e48 | 492 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
ca21a146 RY |
493 | { |
494 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
495 | struct sirfsoc_dma_desc *sdesc = NULL; | |
496 | unsigned long iflags; | |
497 | ||
498 | /* | |
499 | * we only support cycle transfer with 2 period | |
500 | * If the X-length is set to 0, it would be the loop mode. | |
501 | * The DMA address keeps increasing until reaching the end of a loop | |
502 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | |
503 | * the DMA address goes back to the beginning of this area. | |
504 | * In loop mode, the DMA data region is divided into two parts, BUFA | |
505 | * and BUFB. DMA controller generates interrupts twice in each loop: | |
506 | * when the DMA address reaches the end of BUFA or the end of the | |
507 | * BUFB | |
508 | */ | |
509 | if (buf_len != 2 * period_len) | |
510 | return ERR_PTR(-EINVAL); | |
511 | ||
512 | /* Get free descriptor */ | |
513 | spin_lock_irqsave(&schan->lock, iflags); | |
514 | if (!list_empty(&schan->free)) { | |
515 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
516 | node); | |
517 | list_del(&sdesc->node); | |
518 | } | |
519 | spin_unlock_irqrestore(&schan->lock, iflags); | |
520 | ||
521 | if (!sdesc) | |
522 | return 0; | |
523 | ||
524 | /* Place descriptor in prepared list */ | |
525 | spin_lock_irqsave(&schan->lock, iflags); | |
526 | sdesc->addr = addr; | |
527 | sdesc->cyclic = 1; | |
528 | sdesc->xlen = 0; | |
529 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | |
530 | sdesc->width = 1; | |
531 | list_add_tail(&sdesc->node, &schan->prepared); | |
532 | spin_unlock_irqrestore(&schan->lock, iflags); | |
533 | ||
534 | return &sdesc->desc; | |
535 | } | |
536 | ||
537 | /* | |
538 | * The DMA controller consists of 16 independent DMA channels. | |
539 | * Each channel is allocated to a different function | |
540 | */ | |
541 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | |
542 | { | |
543 | unsigned int ch_nr = (unsigned int) chan_id; | |
544 | ||
545 | if (ch_nr == chan->chan_id + | |
546 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | |
547 | return true; | |
548 | ||
549 | return false; | |
550 | } | |
551 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |
552 | ||
553 | static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |
554 | { | |
555 | struct device_node *dn = op->dev.of_node; | |
556 | struct device *dev = &op->dev; | |
557 | struct dma_device *dma; | |
558 | struct sirfsoc_dma *sdma; | |
559 | struct sirfsoc_dma_chan *schan; | |
560 | struct resource res; | |
561 | ulong regs_start, regs_size; | |
562 | u32 id; | |
563 | int ret, i; | |
564 | ||
565 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | |
566 | if (!sdma) { | |
567 | dev_err(dev, "Memory exhausted!\n"); | |
568 | return -ENOMEM; | |
569 | } | |
570 | ||
571 | if (of_property_read_u32(dn, "cell-index", &id)) { | |
572 | dev_err(dev, "Fail to get DMAC index\n"); | |
94d3901c | 573 | return -ENODEV; |
ca21a146 RY |
574 | } |
575 | ||
576 | sdma->irq = irq_of_parse_and_map(dn, 0); | |
577 | if (sdma->irq == NO_IRQ) { | |
578 | dev_err(dev, "Error mapping IRQ!\n"); | |
94d3901c | 579 | return -EINVAL; |
ca21a146 RY |
580 | } |
581 | ||
582 | ret = of_address_to_resource(dn, 0, &res); | |
583 | if (ret) { | |
584 | dev_err(dev, "Error parsing memory region!\n"); | |
94d3901c | 585 | goto irq_dispose; |
ca21a146 RY |
586 | } |
587 | ||
588 | regs_start = res.start; | |
589 | regs_size = resource_size(&res); | |
590 | ||
591 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | |
592 | if (!sdma->base) { | |
593 | dev_err(dev, "Error mapping memory region!\n"); | |
594 | ret = -ENOMEM; | |
595 | goto irq_dispose; | |
596 | } | |
597 | ||
94d3901c | 598 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
ca21a146 RY |
599 | if (ret) { |
600 | dev_err(dev, "Error requesting IRQ!\n"); | |
601 | ret = -EINVAL; | |
94d3901c | 602 | goto irq_dispose; |
ca21a146 RY |
603 | } |
604 | ||
605 | dma = &sdma->dma; | |
606 | dma->dev = dev; | |
607 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | |
608 | ||
609 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | |
610 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | |
611 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | |
612 | dma->device_control = sirfsoc_dma_control; | |
613 | dma->device_tx_status = sirfsoc_dma_tx_status; | |
614 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | |
615 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | |
616 | ||
617 | INIT_LIST_HEAD(&dma->channels); | |
618 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
619 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
620 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | |
621 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
622 | ||
623 | for (i = 0; i < dma->chancnt; i++) { | |
624 | schan = &sdma->channels[i]; | |
625 | ||
626 | schan->chan.device = dma; | |
d3ee98cd | 627 | dma_cookie_init(&schan->chan); |
ca21a146 RY |
628 | |
629 | INIT_LIST_HEAD(&schan->free); | |
630 | INIT_LIST_HEAD(&schan->prepared); | |
631 | INIT_LIST_HEAD(&schan->queued); | |
632 | INIT_LIST_HEAD(&schan->active); | |
633 | INIT_LIST_HEAD(&schan->completed); | |
634 | ||
635 | spin_lock_init(&schan->lock); | |
636 | list_add_tail(&schan->chan.device_node, &dma->channels); | |
637 | } | |
638 | ||
639 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | |
640 | ||
641 | /* Register DMA engine */ | |
642 | dev_set_drvdata(dev, sdma); | |
643 | ret = dma_async_device_register(dma); | |
644 | if (ret) | |
645 | goto free_irq; | |
646 | ||
647 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | |
648 | ||
649 | return 0; | |
650 | ||
651 | free_irq: | |
94d3901c | 652 | free_irq(sdma->irq, sdma); |
ca21a146 RY |
653 | irq_dispose: |
654 | irq_dispose_mapping(sdma->irq); | |
ca21a146 RY |
655 | return ret; |
656 | } | |
657 | ||
658 | static int __devexit sirfsoc_dma_remove(struct platform_device *op) | |
659 | { | |
660 | struct device *dev = &op->dev; | |
661 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
662 | ||
663 | dma_async_device_unregister(&sdma->dma); | |
94d3901c | 664 | free_irq(sdma->irq, sdma); |
ca21a146 | 665 | irq_dispose_mapping(sdma->irq); |
ca21a146 RY |
666 | return 0; |
667 | } | |
668 | ||
669 | static struct of_device_id sirfsoc_dma_match[] = { | |
670 | { .compatible = "sirf,prima2-dmac", }, | |
671 | {}, | |
672 | }; | |
673 | ||
674 | static struct platform_driver sirfsoc_dma_driver = { | |
675 | .probe = sirfsoc_dma_probe, | |
676 | .remove = __devexit_p(sirfsoc_dma_remove), | |
677 | .driver = { | |
678 | .name = DRV_NAME, | |
679 | .owner = THIS_MODULE, | |
680 | .of_match_table = sirfsoc_dma_match, | |
681 | }, | |
682 | }; | |
683 | ||
c94e9105 | 684 | module_platform_driver(sirfsoc_dma_driver); |
ca21a146 RY |
685 | |
686 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | |
687 | "Barry Song <baohua.song@csr.com>"); | |
688 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | |
689 | MODULE_LICENSE("GPL v2"); |