Commit | Line | Data |
---|---|---|
ca21a146 RY |
1 | /* |
2 | * DMA controller driver for CSR SiRFprimaII | |
3 | * | |
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
5 | * | |
6 | * Licensed under GPLv2 or later. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/of_address.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/of_platform.h> | |
a7e34065 | 19 | #include <linux/clk.h> |
ca21a146 RY |
20 | #include <linux/sirfsoc_dma.h> |
21 | ||
949ff5b8 VK |
22 | #include "dmaengine.h" |
23 | ||
ca21a146 RY |
24 | #define SIRFSOC_DMA_DESCRIPTORS 16 |
25 | #define SIRFSOC_DMA_CHANNELS 16 | |
26 | ||
27 | #define SIRFSOC_DMA_CH_ADDR 0x00 | |
28 | #define SIRFSOC_DMA_CH_XLEN 0x04 | |
29 | #define SIRFSOC_DMA_CH_YLEN 0x08 | |
30 | #define SIRFSOC_DMA_CH_CTRL 0x0C | |
31 | ||
32 | #define SIRFSOC_DMA_WIDTH_0 0x100 | |
33 | #define SIRFSOC_DMA_CH_VALID 0x140 | |
34 | #define SIRFSOC_DMA_CH_INT 0x144 | |
35 | #define SIRFSOC_DMA_INT_EN 0x148 | |
f7d935dc | 36 | #define SIRFSOC_DMA_INT_EN_CLR 0x14C |
ca21a146 | 37 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 |
f7d935dc | 38 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C |
ca21a146 RY |
39 | |
40 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | |
41 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | |
42 | ||
43 | /* xlen and dma_width register is in 4 bytes boundary */ | |
44 | #define SIRFSOC_DMA_WORD_LEN 4 | |
45 | ||
46 | struct sirfsoc_dma_desc { | |
47 | struct dma_async_tx_descriptor desc; | |
48 | struct list_head node; | |
49 | ||
50 | /* SiRFprimaII 2D-DMA parameters */ | |
51 | ||
52 | int xlen; /* DMA xlen */ | |
53 | int ylen; /* DMA ylen */ | |
54 | int width; /* DMA width */ | |
55 | int dir; | |
56 | bool cyclic; /* is loop DMA? */ | |
57 | u32 addr; /* DMA buffer address */ | |
58 | }; | |
59 | ||
60 | struct sirfsoc_dma_chan { | |
61 | struct dma_chan chan; | |
62 | struct list_head free; | |
63 | struct list_head prepared; | |
64 | struct list_head queued; | |
65 | struct list_head active; | |
66 | struct list_head completed; | |
ca21a146 RY |
67 | unsigned long happened_cyclic; |
68 | unsigned long completed_cyclic; | |
69 | ||
70 | /* Lock for this structure */ | |
71 | spinlock_t lock; | |
72 | ||
73 | int mode; | |
74 | }; | |
75 | ||
76 | struct sirfsoc_dma { | |
77 | struct dma_device dma; | |
78 | struct tasklet_struct tasklet; | |
79 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | |
80 | void __iomem *base; | |
81 | int irq; | |
a7e34065 | 82 | struct clk *clk; |
f7d935dc | 83 | bool is_marco; |
ca21a146 RY |
84 | }; |
85 | ||
86 | #define DRV_NAME "sirfsoc_dma" | |
87 | ||
88 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | |
89 | static inline | |
90 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | |
91 | { | |
92 | return container_of(c, struct sirfsoc_dma_chan, chan); | |
93 | } | |
94 | ||
95 | /* Convert struct dma_chan to struct sirfsoc_dma */ | |
96 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | |
97 | { | |
98 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | |
99 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | |
100 | } | |
101 | ||
102 | /* Execute all queued DMA descriptors */ | |
103 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | |
104 | { | |
105 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
106 | int cid = schan->chan.chan_id; | |
107 | struct sirfsoc_dma_desc *sdesc = NULL; | |
108 | ||
109 | /* | |
110 | * lock has been held by functions calling this, so we don't hold | |
111 | * lock again | |
112 | */ | |
113 | ||
114 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | |
115 | node); | |
116 | /* Move the first queued descriptor to active list */ | |
26fd1220 | 117 | list_move_tail(&sdesc->node, &schan->active); |
ca21a146 RY |
118 | |
119 | /* Start the DMA transfer */ | |
120 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | |
121 | cid * 4); | |
122 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
123 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
124 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
125 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | |
126 | SIRFSOC_DMA_CH_XLEN); | |
127 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | |
128 | SIRFSOC_DMA_CH_YLEN); | |
129 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | |
130 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
131 | ||
132 | /* | |
133 | * writel has an implict memory write barrier to make sure data is | |
134 | * flushed into memory before starting DMA | |
135 | */ | |
136 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
137 | ||
138 | if (sdesc->cyclic) { | |
139 | writel((1 << cid) | 1 << (cid + 16) | | |
140 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | |
141 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
142 | schan->happened_cyclic = schan->completed_cyclic = 0; | |
143 | } | |
144 | } | |
145 | ||
146 | /* Interrupt handler */ | |
147 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | |
148 | { | |
149 | struct sirfsoc_dma *sdma = data; | |
150 | struct sirfsoc_dma_chan *schan; | |
151 | struct sirfsoc_dma_desc *sdesc = NULL; | |
152 | u32 is; | |
153 | int ch; | |
154 | ||
155 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | |
156 | while ((ch = fls(is) - 1) >= 0) { | |
157 | is &= ~(1 << ch); | |
158 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | |
159 | schan = &sdma->channels[ch]; | |
160 | ||
161 | spin_lock(&schan->lock); | |
162 | ||
163 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
164 | node); | |
165 | if (!sdesc->cyclic) { | |
166 | /* Execute queued descriptors */ | |
167 | list_splice_tail_init(&schan->active, &schan->completed); | |
168 | if (!list_empty(&schan->queued)) | |
169 | sirfsoc_dma_execute(schan); | |
170 | } else | |
171 | schan->happened_cyclic++; | |
172 | ||
173 | spin_unlock(&schan->lock); | |
174 | } | |
175 | ||
176 | /* Schedule tasklet */ | |
177 | tasklet_schedule(&sdma->tasklet); | |
178 | ||
179 | return IRQ_HANDLED; | |
180 | } | |
181 | ||
182 | /* process completed descriptors */ | |
183 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |
184 | { | |
185 | dma_cookie_t last_cookie = 0; | |
186 | struct sirfsoc_dma_chan *schan; | |
187 | struct sirfsoc_dma_desc *sdesc; | |
188 | struct dma_async_tx_descriptor *desc; | |
189 | unsigned long flags; | |
190 | unsigned long happened_cyclic; | |
191 | LIST_HEAD(list); | |
192 | int i; | |
193 | ||
194 | for (i = 0; i < sdma->dma.chancnt; i++) { | |
195 | schan = &sdma->channels[i]; | |
196 | ||
197 | /* Get all completed descriptors */ | |
198 | spin_lock_irqsave(&schan->lock, flags); | |
199 | if (!list_empty(&schan->completed)) { | |
200 | list_splice_tail_init(&schan->completed, &list); | |
201 | spin_unlock_irqrestore(&schan->lock, flags); | |
202 | ||
203 | /* Execute callbacks and run dependencies */ | |
204 | list_for_each_entry(sdesc, &list, node) { | |
205 | desc = &sdesc->desc; | |
206 | ||
207 | if (desc->callback) | |
208 | desc->callback(desc->callback_param); | |
209 | ||
210 | last_cookie = desc->cookie; | |
211 | dma_run_dependencies(desc); | |
212 | } | |
213 | ||
214 | /* Free descriptors */ | |
215 | spin_lock_irqsave(&schan->lock, flags); | |
216 | list_splice_tail_init(&list, &schan->free); | |
4d4e58de | 217 | schan->chan.completed_cookie = last_cookie; |
ca21a146 RY |
218 | spin_unlock_irqrestore(&schan->lock, flags); |
219 | } else { | |
220 | /* for cyclic channel, desc is always in active list */ | |
221 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
222 | node); | |
223 | ||
224 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | |
225 | /* without active cyclic DMA */ | |
226 | spin_unlock_irqrestore(&schan->lock, flags); | |
227 | continue; | |
228 | } | |
229 | ||
230 | /* cyclic DMA */ | |
231 | happened_cyclic = schan->happened_cyclic; | |
232 | spin_unlock_irqrestore(&schan->lock, flags); | |
233 | ||
234 | desc = &sdesc->desc; | |
235 | while (happened_cyclic != schan->completed_cyclic) { | |
236 | if (desc->callback) | |
237 | desc->callback(desc->callback_param); | |
238 | schan->completed_cyclic++; | |
239 | } | |
240 | } | |
241 | } | |
242 | } | |
243 | ||
244 | /* DMA Tasklet */ | |
245 | static void sirfsoc_dma_tasklet(unsigned long data) | |
246 | { | |
247 | struct sirfsoc_dma *sdma = (void *)data; | |
248 | ||
249 | sirfsoc_dma_process_completed(sdma); | |
250 | } | |
251 | ||
252 | /* Submit descriptor to hardware */ | |
253 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
254 | { | |
255 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | |
256 | struct sirfsoc_dma_desc *sdesc; | |
257 | unsigned long flags; | |
258 | dma_cookie_t cookie; | |
259 | ||
260 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | |
261 | ||
262 | spin_lock_irqsave(&schan->lock, flags); | |
263 | ||
264 | /* Move descriptor to queue */ | |
265 | list_move_tail(&sdesc->node, &schan->queued); | |
266 | ||
884485e1 | 267 | cookie = dma_cookie_assign(txd); |
ca21a146 RY |
268 | |
269 | spin_unlock_irqrestore(&schan->lock, flags); | |
270 | ||
271 | return cookie; | |
272 | } | |
273 | ||
274 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | |
275 | struct dma_slave_config *config) | |
276 | { | |
277 | unsigned long flags; | |
278 | ||
279 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
280 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
281 | return -EINVAL; | |
282 | ||
283 | spin_lock_irqsave(&schan->lock, flags); | |
284 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | |
285 | spin_unlock_irqrestore(&schan->lock, flags); | |
286 | ||
287 | return 0; | |
288 | } | |
289 | ||
290 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | |
291 | { | |
292 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
293 | int cid = schan->chan.chan_id; | |
294 | unsigned long flags; | |
295 | ||
2b99c259 | 296 | spin_lock_irqsave(&schan->lock, flags); |
ca21a146 | 297 | |
f7d935dc BS |
298 | if (!sdma->is_marco) { |
299 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | |
300 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
301 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
302 | & ~((1 << cid) | 1 << (cid + 16)), | |
ca21a146 | 303 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); |
f7d935dc BS |
304 | } else { |
305 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); | |
306 | writel_relaxed((1 << cid) | 1 << (cid + 16), | |
307 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); | |
308 | } | |
309 | ||
310 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
ca21a146 | 311 | |
ca21a146 RY |
312 | list_splice_tail_init(&schan->active, &schan->free); |
313 | list_splice_tail_init(&schan->queued, &schan->free); | |
2b99c259 | 314 | |
ca21a146 RY |
315 | spin_unlock_irqrestore(&schan->lock, flags); |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
2518d1d1 BS |
320 | static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) |
321 | { | |
322 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
323 | int cid = schan->chan.chan_id; | |
324 | unsigned long flags; | |
325 | ||
326 | spin_lock_irqsave(&schan->lock, flags); | |
327 | ||
328 | if (!sdma->is_marco) | |
329 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
330 | & ~((1 << cid) | 1 << (cid + 16)), | |
331 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
332 | else | |
333 | writel_relaxed((1 << cid) | 1 << (cid + 16), | |
334 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); | |
335 | ||
336 | spin_unlock_irqrestore(&schan->lock, flags); | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
341 | static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) | |
342 | { | |
343 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
344 | int cid = schan->chan.chan_id; | |
345 | unsigned long flags; | |
346 | ||
347 | spin_lock_irqsave(&schan->lock, flags); | |
348 | ||
349 | if (!sdma->is_marco) | |
350 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
351 | | ((1 << cid) | 1 << (cid + 16)), | |
352 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
353 | else | |
354 | writel_relaxed((1 << cid) | 1 << (cid + 16), | |
355 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
356 | ||
ca21a146 RY |
357 | spin_unlock_irqrestore(&schan->lock, flags); |
358 | ||
359 | return 0; | |
360 | } | |
361 | ||
362 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
363 | unsigned long arg) | |
364 | { | |
365 | struct dma_slave_config *config; | |
366 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
367 | ||
368 | switch (cmd) { | |
2518d1d1 BS |
369 | case DMA_PAUSE: |
370 | return sirfsoc_dma_pause_chan(schan); | |
371 | case DMA_RESUME: | |
372 | return sirfsoc_dma_resume_chan(schan); | |
ca21a146 RY |
373 | case DMA_TERMINATE_ALL: |
374 | return sirfsoc_dma_terminate_all(schan); | |
375 | case DMA_SLAVE_CONFIG: | |
376 | config = (struct dma_slave_config *)arg; | |
377 | return sirfsoc_dma_slave_config(schan, config); | |
378 | ||
379 | default: | |
380 | break; | |
381 | } | |
382 | ||
383 | return -ENOSYS; | |
384 | } | |
385 | ||
386 | /* Alloc channel resources */ | |
387 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |
388 | { | |
389 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
390 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
391 | struct sirfsoc_dma_desc *sdesc; | |
392 | unsigned long flags; | |
393 | LIST_HEAD(descs); | |
394 | int i; | |
395 | ||
396 | /* Alloc descriptors for this channel */ | |
397 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | |
398 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | |
399 | if (!sdesc) { | |
400 | dev_notice(sdma->dma.dev, "Memory allocation error. " | |
401 | "Allocated only %u descriptors\n", i); | |
402 | break; | |
403 | } | |
404 | ||
405 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | |
406 | sdesc->desc.flags = DMA_CTRL_ACK; | |
407 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | |
408 | ||
409 | list_add_tail(&sdesc->node, &descs); | |
410 | } | |
411 | ||
412 | /* Return error only if no descriptors were allocated */ | |
413 | if (i == 0) | |
414 | return -ENOMEM; | |
415 | ||
416 | spin_lock_irqsave(&schan->lock, flags); | |
417 | ||
418 | list_splice_tail_init(&descs, &schan->free); | |
419 | spin_unlock_irqrestore(&schan->lock, flags); | |
420 | ||
421 | return i; | |
422 | } | |
423 | ||
424 | /* Free channel resources */ | |
425 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |
426 | { | |
427 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
428 | struct sirfsoc_dma_desc *sdesc, *tmp; | |
429 | unsigned long flags; | |
430 | LIST_HEAD(descs); | |
431 | ||
432 | spin_lock_irqsave(&schan->lock, flags); | |
433 | ||
434 | /* Channel must be idle */ | |
435 | BUG_ON(!list_empty(&schan->prepared)); | |
436 | BUG_ON(!list_empty(&schan->queued)); | |
437 | BUG_ON(!list_empty(&schan->active)); | |
438 | BUG_ON(!list_empty(&schan->completed)); | |
439 | ||
440 | /* Move data */ | |
441 | list_splice_tail_init(&schan->free, &descs); | |
442 | ||
443 | spin_unlock_irqrestore(&schan->lock, flags); | |
444 | ||
445 | /* Free descriptors */ | |
446 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | |
447 | kfree(sdesc); | |
448 | } | |
449 | ||
450 | /* Send pending descriptor to hardware */ | |
451 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | |
452 | { | |
453 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
454 | unsigned long flags; | |
455 | ||
456 | spin_lock_irqsave(&schan->lock, flags); | |
457 | ||
458 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | |
459 | sirfsoc_dma_execute(schan); | |
460 | ||
461 | spin_unlock_irqrestore(&schan->lock, flags); | |
462 | } | |
463 | ||
464 | /* Check request completion status */ | |
465 | static enum dma_status | |
466 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
467 | struct dma_tx_state *txstate) | |
468 | { | |
add93b57 | 469 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); |
ca21a146 RY |
470 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
471 | unsigned long flags; | |
96a2af41 | 472 | enum dma_status ret; |
add93b57 RY |
473 | struct sirfsoc_dma_desc *sdesc; |
474 | int cid = schan->chan.chan_id; | |
475 | unsigned long dma_pos; | |
476 | unsigned long dma_request_bytes; | |
477 | unsigned long residue; | |
ca21a146 RY |
478 | |
479 | spin_lock_irqsave(&schan->lock, flags); | |
add93b57 RY |
480 | |
481 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
482 | node); | |
483 | dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * | |
484 | (sdesc->width * SIRFSOC_DMA_WORD_LEN); | |
485 | ||
96a2af41 | 486 | ret = dma_cookie_status(chan, cookie, txstate); |
add93b57 RY |
487 | dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) |
488 | << 2; | |
489 | residue = dma_request_bytes - (dma_pos - sdesc->addr); | |
490 | dma_set_residue(txstate, residue); | |
491 | ||
ca21a146 RY |
492 | spin_unlock_irqrestore(&schan->lock, flags); |
493 | ||
96a2af41 | 494 | return ret; |
ca21a146 RY |
495 | } |
496 | ||
497 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | |
498 | struct dma_chan *chan, struct dma_interleaved_template *xt, | |
499 | unsigned long flags) | |
500 | { | |
501 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
502 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
503 | struct sirfsoc_dma_desc *sdesc = NULL; | |
504 | unsigned long iflags; | |
505 | int ret; | |
506 | ||
5997e089 | 507 | if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { |
ca21a146 RY |
508 | ret = -EINVAL; |
509 | goto err_dir; | |
510 | } | |
511 | ||
512 | /* Get free descriptor */ | |
513 | spin_lock_irqsave(&schan->lock, iflags); | |
514 | if (!list_empty(&schan->free)) { | |
515 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
516 | node); | |
517 | list_del(&sdesc->node); | |
518 | } | |
519 | spin_unlock_irqrestore(&schan->lock, iflags); | |
520 | ||
521 | if (!sdesc) { | |
522 | /* try to free completed descriptors */ | |
523 | sirfsoc_dma_process_completed(sdma); | |
524 | ret = 0; | |
525 | goto no_desc; | |
526 | } | |
527 | ||
528 | /* Place descriptor in prepared list */ | |
529 | spin_lock_irqsave(&schan->lock, iflags); | |
530 | ||
531 | /* | |
532 | * Number of chunks in a frame can only be 1 for prima2 | |
533 | * and ylen (number of frame - 1) must be at least 0 | |
534 | */ | |
535 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | |
536 | sdesc->cyclic = 0; | |
537 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | |
538 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | |
539 | SIRFSOC_DMA_WORD_LEN; | |
540 | sdesc->ylen = xt->numf - 1; | |
541 | if (xt->dir == DMA_MEM_TO_DEV) { | |
542 | sdesc->addr = xt->src_start; | |
543 | sdesc->dir = 1; | |
544 | } else { | |
545 | sdesc->addr = xt->dst_start; | |
546 | sdesc->dir = 0; | |
547 | } | |
548 | ||
549 | list_add_tail(&sdesc->node, &schan->prepared); | |
550 | } else { | |
551 | pr_err("sirfsoc DMA Invalid xfer\n"); | |
552 | ret = -EINVAL; | |
553 | goto err_xfer; | |
554 | } | |
555 | spin_unlock_irqrestore(&schan->lock, iflags); | |
556 | ||
557 | return &sdesc->desc; | |
558 | err_xfer: | |
559 | spin_unlock_irqrestore(&schan->lock, iflags); | |
560 | no_desc: | |
561 | err_dir: | |
562 | return ERR_PTR(ret); | |
563 | } | |
564 | ||
565 | static struct dma_async_tx_descriptor * | |
566 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |
567 | size_t buf_len, size_t period_len, | |
ec8b5e48 | 568 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
ca21a146 RY |
569 | { |
570 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
571 | struct sirfsoc_dma_desc *sdesc = NULL; | |
572 | unsigned long iflags; | |
573 | ||
574 | /* | |
575 | * we only support cycle transfer with 2 period | |
576 | * If the X-length is set to 0, it would be the loop mode. | |
577 | * The DMA address keeps increasing until reaching the end of a loop | |
578 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | |
579 | * the DMA address goes back to the beginning of this area. | |
580 | * In loop mode, the DMA data region is divided into two parts, BUFA | |
581 | * and BUFB. DMA controller generates interrupts twice in each loop: | |
582 | * when the DMA address reaches the end of BUFA or the end of the | |
583 | * BUFB | |
584 | */ | |
585 | if (buf_len != 2 * period_len) | |
586 | return ERR_PTR(-EINVAL); | |
587 | ||
588 | /* Get free descriptor */ | |
589 | spin_lock_irqsave(&schan->lock, iflags); | |
590 | if (!list_empty(&schan->free)) { | |
591 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
592 | node); | |
593 | list_del(&sdesc->node); | |
594 | } | |
595 | spin_unlock_irqrestore(&schan->lock, iflags); | |
596 | ||
597 | if (!sdesc) | |
598 | return 0; | |
599 | ||
600 | /* Place descriptor in prepared list */ | |
601 | spin_lock_irqsave(&schan->lock, iflags); | |
602 | sdesc->addr = addr; | |
603 | sdesc->cyclic = 1; | |
604 | sdesc->xlen = 0; | |
605 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | |
606 | sdesc->width = 1; | |
607 | list_add_tail(&sdesc->node, &schan->prepared); | |
608 | spin_unlock_irqrestore(&schan->lock, iflags); | |
609 | ||
610 | return &sdesc->desc; | |
611 | } | |
612 | ||
613 | /* | |
614 | * The DMA controller consists of 16 independent DMA channels. | |
615 | * Each channel is allocated to a different function | |
616 | */ | |
617 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | |
618 | { | |
619 | unsigned int ch_nr = (unsigned int) chan_id; | |
620 | ||
621 | if (ch_nr == chan->chan_id + | |
622 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | |
623 | return true; | |
624 | ||
625 | return false; | |
626 | } | |
627 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |
628 | ||
463a1f8b | 629 | static int sirfsoc_dma_probe(struct platform_device *op) |
ca21a146 RY |
630 | { |
631 | struct device_node *dn = op->dev.of_node; | |
632 | struct device *dev = &op->dev; | |
633 | struct dma_device *dma; | |
634 | struct sirfsoc_dma *sdma; | |
635 | struct sirfsoc_dma_chan *schan; | |
636 | struct resource res; | |
637 | ulong regs_start, regs_size; | |
638 | u32 id; | |
639 | int ret, i; | |
640 | ||
641 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | |
642 | if (!sdma) { | |
643 | dev_err(dev, "Memory exhausted!\n"); | |
644 | return -ENOMEM; | |
645 | } | |
646 | ||
f7d935dc BS |
647 | if (of_device_is_compatible(dn, "sirf,marco-dmac")) |
648 | sdma->is_marco = true; | |
649 | ||
ca21a146 RY |
650 | if (of_property_read_u32(dn, "cell-index", &id)) { |
651 | dev_err(dev, "Fail to get DMAC index\n"); | |
94d3901c | 652 | return -ENODEV; |
ca21a146 RY |
653 | } |
654 | ||
655 | sdma->irq = irq_of_parse_and_map(dn, 0); | |
656 | if (sdma->irq == NO_IRQ) { | |
657 | dev_err(dev, "Error mapping IRQ!\n"); | |
94d3901c | 658 | return -EINVAL; |
ca21a146 RY |
659 | } |
660 | ||
a7e34065 BS |
661 | sdma->clk = devm_clk_get(dev, NULL); |
662 | if (IS_ERR(sdma->clk)) { | |
663 | dev_err(dev, "failed to get a clock.\n"); | |
664 | return PTR_ERR(sdma->clk); | |
665 | } | |
666 | ||
ca21a146 RY |
667 | ret = of_address_to_resource(dn, 0, &res); |
668 | if (ret) { | |
669 | dev_err(dev, "Error parsing memory region!\n"); | |
94d3901c | 670 | goto irq_dispose; |
ca21a146 RY |
671 | } |
672 | ||
673 | regs_start = res.start; | |
674 | regs_size = resource_size(&res); | |
675 | ||
676 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | |
677 | if (!sdma->base) { | |
678 | dev_err(dev, "Error mapping memory region!\n"); | |
679 | ret = -ENOMEM; | |
680 | goto irq_dispose; | |
681 | } | |
682 | ||
94d3901c | 683 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
ca21a146 RY |
684 | if (ret) { |
685 | dev_err(dev, "Error requesting IRQ!\n"); | |
686 | ret = -EINVAL; | |
94d3901c | 687 | goto irq_dispose; |
ca21a146 RY |
688 | } |
689 | ||
690 | dma = &sdma->dma; | |
691 | dma->dev = dev; | |
692 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | |
693 | ||
694 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | |
695 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | |
696 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | |
697 | dma->device_control = sirfsoc_dma_control; | |
698 | dma->device_tx_status = sirfsoc_dma_tx_status; | |
699 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | |
700 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | |
701 | ||
702 | INIT_LIST_HEAD(&dma->channels); | |
703 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
704 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
705 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | |
706 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
707 | ||
708 | for (i = 0; i < dma->chancnt; i++) { | |
709 | schan = &sdma->channels[i]; | |
710 | ||
711 | schan->chan.device = dma; | |
d3ee98cd | 712 | dma_cookie_init(&schan->chan); |
ca21a146 RY |
713 | |
714 | INIT_LIST_HEAD(&schan->free); | |
715 | INIT_LIST_HEAD(&schan->prepared); | |
716 | INIT_LIST_HEAD(&schan->queued); | |
717 | INIT_LIST_HEAD(&schan->active); | |
718 | INIT_LIST_HEAD(&schan->completed); | |
719 | ||
720 | spin_lock_init(&schan->lock); | |
721 | list_add_tail(&schan->chan.device_node, &dma->channels); | |
722 | } | |
723 | ||
724 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | |
725 | ||
a7e34065 BS |
726 | clk_prepare_enable(sdma->clk); |
727 | ||
ca21a146 RY |
728 | /* Register DMA engine */ |
729 | dev_set_drvdata(dev, sdma); | |
730 | ret = dma_async_device_register(dma); | |
731 | if (ret) | |
732 | goto free_irq; | |
733 | ||
734 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | |
735 | ||
736 | return 0; | |
737 | ||
738 | free_irq: | |
94d3901c | 739 | free_irq(sdma->irq, sdma); |
ca21a146 RY |
740 | irq_dispose: |
741 | irq_dispose_mapping(sdma->irq); | |
ca21a146 RY |
742 | return ret; |
743 | } | |
744 | ||
4bf27b8b | 745 | static int sirfsoc_dma_remove(struct platform_device *op) |
ca21a146 RY |
746 | { |
747 | struct device *dev = &op->dev; | |
748 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
749 | ||
a7e34065 | 750 | clk_disable_unprepare(sdma->clk); |
ca21a146 | 751 | dma_async_device_unregister(&sdma->dma); |
94d3901c | 752 | free_irq(sdma->irq, sdma); |
ca21a146 | 753 | irq_dispose_mapping(sdma->irq); |
ca21a146 RY |
754 | return 0; |
755 | } | |
756 | ||
757 | static struct of_device_id sirfsoc_dma_match[] = { | |
758 | { .compatible = "sirf,prima2-dmac", }, | |
f7d935dc | 759 | { .compatible = "sirf,marco-dmac", }, |
ca21a146 RY |
760 | {}, |
761 | }; | |
762 | ||
763 | static struct platform_driver sirfsoc_dma_driver = { | |
764 | .probe = sirfsoc_dma_probe, | |
a7d6e3ec | 765 | .remove = sirfsoc_dma_remove, |
ca21a146 RY |
766 | .driver = { |
767 | .name = DRV_NAME, | |
768 | .owner = THIS_MODULE, | |
769 | .of_match_table = sirfsoc_dma_match, | |
770 | }, | |
771 | }; | |
772 | ||
42361f20 BS |
773 | static __init int sirfsoc_dma_init(void) |
774 | { | |
775 | return platform_driver_register(&sirfsoc_dma_driver); | |
776 | } | |
777 | ||
778 | static void __exit sirfsoc_dma_exit(void) | |
779 | { | |
780 | platform_driver_unregister(&sirfsoc_dma_driver); | |
781 | } | |
782 | ||
783 | subsys_initcall(sirfsoc_dma_init); | |
784 | module_exit(sirfsoc_dma_exit); | |
ca21a146 RY |
785 | |
786 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | |
787 | "Barry Song <baohua.song@csr.com>"); | |
788 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | |
789 | MODULE_LICENSE("GPL v2"); |