Commit | Line | Data |
---|---|---|
4de3a8fa CC |
1 | /* |
2 | * arch/arm/mach-tegra/dma.c | |
3 | * | |
4 | * System DMA driver for NVIDIA Tegra SoCs | |
5 | * | |
6 | * Copyright (c) 2008-2009, NVIDIA Corporation. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/io.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/err.h> | |
28 | #include <linux/irq.h> | |
29 | #include <linux/delay.h> | |
30 | #include <mach/dma.h> | |
31 | #include <mach/irqs.h> | |
32 | #include <mach/iomap.h> | |
33 | ||
34 | #define APB_DMA_GEN 0x000 | |
35 | #define GEN_ENABLE (1<<31) | |
36 | ||
37 | #define APB_DMA_CNTRL 0x010 | |
38 | ||
39 | #define APB_DMA_IRQ_MASK 0x01c | |
40 | ||
41 | #define APB_DMA_IRQ_MASK_SET 0x020 | |
42 | ||
43 | #define APB_DMA_CHAN_CSR 0x000 | |
44 | #define CSR_ENB (1<<31) | |
45 | #define CSR_IE_EOC (1<<30) | |
46 | #define CSR_HOLD (1<<29) | |
47 | #define CSR_DIR (1<<28) | |
48 | #define CSR_ONCE (1<<27) | |
49 | #define CSR_FLOW (1<<21) | |
50 | #define CSR_REQ_SEL_SHIFT 16 | |
51 | #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT) | |
52 | #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT) | |
53 | #define CSR_WCOUNT_SHIFT 2 | |
54 | #define CSR_WCOUNT_MASK 0xFFFC | |
55 | ||
56 | #define APB_DMA_CHAN_STA 0x004 | |
57 | #define STA_BUSY (1<<31) | |
58 | #define STA_ISE_EOC (1<<30) | |
59 | #define STA_HALT (1<<29) | |
60 | #define STA_PING_PONG (1<<28) | |
61 | #define STA_COUNT_SHIFT 2 | |
62 | #define STA_COUNT_MASK 0xFFFC | |
63 | ||
64 | #define APB_DMA_CHAN_AHB_PTR 0x010 | |
65 | ||
66 | #define APB_DMA_CHAN_AHB_SEQ 0x014 | |
67 | #define AHB_SEQ_INTR_ENB (1<<31) | |
68 | #define AHB_SEQ_BUS_WIDTH_SHIFT 28 | |
69 | #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) | |
70 | #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT) | |
71 | #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT) | |
72 | #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT) | |
73 | #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT) | |
74 | #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT) | |
75 | #define AHB_SEQ_DATA_SWAP (1<<27) | |
76 | #define AHB_SEQ_BURST_MASK (0x7<<24) | |
77 | #define AHB_SEQ_BURST_1 (4<<24) | |
78 | #define AHB_SEQ_BURST_4 (5<<24) | |
79 | #define AHB_SEQ_BURST_8 (6<<24) | |
80 | #define AHB_SEQ_DBL_BUF (1<<19) | |
81 | #define AHB_SEQ_WRAP_SHIFT 16 | |
82 | #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT) | |
83 | ||
84 | #define APB_DMA_CHAN_APB_PTR 0x018 | |
85 | ||
86 | #define APB_DMA_CHAN_APB_SEQ 0x01c | |
87 | #define APB_SEQ_BUS_WIDTH_SHIFT 28 | |
88 | #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT) | |
89 | #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT) | |
90 | #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT) | |
91 | #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT) | |
92 | #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT) | |
93 | #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT) | |
94 | #define APB_SEQ_DATA_SWAP (1<<27) | |
95 | #define APB_SEQ_WRAP_SHIFT 16 | |
96 | #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT) | |
97 | ||
98 | #define TEGRA_SYSTEM_DMA_CH_NR 16 | |
99 | #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4 | |
100 | #define TEGRA_SYSTEM_DMA_CH_MIN 0 | |
101 | #define TEGRA_SYSTEM_DMA_CH_MAX \ | |
102 | (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) | |
103 | ||
104 | #define NV_DMA_MAX_TRASFER_SIZE 0x10000 | |
105 | ||
106 | const unsigned int ahb_addr_wrap_table[8] = { | |
107 | 0, 32, 64, 128, 256, 512, 1024, 2048 | |
108 | }; | |
109 | ||
110 | const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64}; | |
111 | ||
112 | const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128}; | |
113 | ||
114 | #define TEGRA_DMA_NAME_SIZE 16 | |
115 | struct tegra_dma_channel { | |
116 | struct list_head list; | |
117 | int id; | |
118 | spinlock_t lock; | |
119 | char name[TEGRA_DMA_NAME_SIZE]; | |
120 | void __iomem *addr; | |
121 | int mode; | |
122 | int irq; | |
123 | ||
124 | /* Register shadow */ | |
125 | u32 csr; | |
126 | u32 ahb_seq; | |
127 | u32 ahb_ptr; | |
128 | u32 apb_seq; | |
129 | u32 apb_ptr; | |
130 | }; | |
131 | ||
132 | #define NV_DMA_MAX_CHANNELS 32 | |
133 | ||
134 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | |
135 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | |
136 | ||
137 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |
138 | struct tegra_dma_req *req); | |
139 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | |
140 | struct tegra_dma_req *req); | |
141 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch); | |
142 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | |
143 | ||
144 | void tegra_dma_flush(struct tegra_dma_channel *ch) | |
145 | { | |
146 | } | |
147 | EXPORT_SYMBOL(tegra_dma_flush); | |
148 | ||
149 | void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |
150 | { | |
151 | struct tegra_dma_req *req; | |
152 | ||
153 | req = list_entry(ch->list.next, typeof(*req), node); | |
154 | ||
155 | tegra_dma_dequeue_req(ch, req); | |
156 | return; | |
157 | } | |
158 | ||
159 | void tegra_dma_stop(struct tegra_dma_channel *ch) | |
160 | { | |
161 | unsigned int csr; | |
162 | unsigned int status; | |
163 | ||
164 | csr = ch->csr; | |
165 | csr &= ~CSR_IE_EOC; | |
166 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | |
167 | ||
168 | csr &= ~CSR_ENB; | |
169 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | |
170 | ||
171 | status = readl(ch->addr + APB_DMA_CHAN_STA); | |
172 | if (status & STA_ISE_EOC) | |
173 | writel(status, ch->addr + APB_DMA_CHAN_STA); | |
174 | } | |
175 | ||
176 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | |
177 | { | |
178 | unsigned int csr; | |
179 | unsigned long irq_flags; | |
180 | ||
181 | spin_lock_irqsave(&ch->lock, irq_flags); | |
182 | while (!list_empty(&ch->list)) | |
183 | list_del(ch->list.next); | |
184 | ||
185 | csr = ch->csr; | |
186 | csr &= ~CSR_REQ_SEL_MASK; | |
187 | csr |= CSR_REQ_SEL_INVALID; | |
188 | ||
189 | /* Set the enable as that is not shadowed */ | |
190 | csr |= CSR_ENB; | |
191 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | |
192 | ||
193 | tegra_dma_stop(ch); | |
194 | ||
195 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
196 | return 0; | |
197 | } | |
198 | ||
199 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | |
200 | struct tegra_dma_req *_req) | |
201 | { | |
202 | unsigned int csr; | |
203 | unsigned int status; | |
204 | struct tegra_dma_req *req = NULL; | |
205 | int found = 0; | |
206 | unsigned long irq_flags; | |
207 | int to_transfer; | |
208 | int req_transfer_count; | |
209 | ||
210 | spin_lock_irqsave(&ch->lock, irq_flags); | |
211 | list_for_each_entry(req, &ch->list, node) { | |
212 | if (req == _req) { | |
213 | list_del(&req->node); | |
214 | found = 1; | |
215 | break; | |
216 | } | |
217 | } | |
218 | if (!found) { | |
219 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
220 | return 0; | |
221 | } | |
222 | ||
223 | /* STOP the DMA and get the transfer count. | |
224 | * Getting the transfer count is tricky. | |
225 | * - Change the source selector to invalid to stop the DMA from | |
226 | * FIFO to memory. | |
227 | * - Read the status register to know the number of pending | |
228 | * bytes to be transfered. | |
229 | * - Finally stop or program the DMA to the next buffer in the | |
230 | * list. | |
231 | */ | |
232 | csr = ch->csr; | |
233 | csr &= ~CSR_REQ_SEL_MASK; | |
234 | csr |= CSR_REQ_SEL_INVALID; | |
235 | ||
236 | /* Set the enable as that is not shadowed */ | |
237 | csr |= CSR_ENB; | |
238 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | |
239 | ||
240 | /* Get the transfer count */ | |
241 | status = readl(ch->addr + APB_DMA_CHAN_STA); | |
242 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | |
243 | req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | |
244 | req_transfer_count += 1; | |
245 | to_transfer += 1; | |
246 | ||
247 | req->bytes_transferred = req_transfer_count; | |
248 | ||
249 | if (status & STA_BUSY) | |
250 | req->bytes_transferred -= to_transfer; | |
251 | ||
252 | /* In continous transfer mode, DMA only tracks the count of the | |
253 | * half DMA buffer. So, if the DMA already finished half the DMA | |
254 | * then add the half buffer to the completed count. | |
255 | * | |
256 | * FIXME: There can be a race here. What if the req to | |
257 | * dequue happens at the same time as the DMA just moved to | |
258 | * the new buffer and SW didn't yet received the interrupt? | |
259 | */ | |
260 | if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) | |
261 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | |
262 | req->bytes_transferred += req_transfer_count; | |
263 | ||
264 | req->bytes_transferred *= 4; | |
265 | ||
266 | tegra_dma_stop(ch); | |
267 | if (!list_empty(&ch->list)) { | |
268 | /* if the list is not empty, queue the next request */ | |
269 | struct tegra_dma_req *next_req; | |
270 | next_req = list_entry(ch->list.next, | |
271 | typeof(*next_req), node); | |
272 | tegra_dma_update_hw(ch, next_req); | |
273 | } | |
274 | req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; | |
275 | ||
276 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
277 | ||
278 | /* Callback should be called without any lock */ | |
279 | req->complete(req); | |
280 | return 0; | |
281 | } | |
282 | EXPORT_SYMBOL(tegra_dma_dequeue_req); | |
283 | ||
284 | bool tegra_dma_is_empty(struct tegra_dma_channel *ch) | |
285 | { | |
286 | unsigned long irq_flags; | |
287 | bool is_empty; | |
288 | ||
289 | spin_lock_irqsave(&ch->lock, irq_flags); | |
290 | if (list_empty(&ch->list)) | |
291 | is_empty = true; | |
292 | else | |
293 | is_empty = false; | |
294 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
295 | return is_empty; | |
296 | } | |
297 | EXPORT_SYMBOL(tegra_dma_is_empty); | |
298 | ||
299 | bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, | |
300 | struct tegra_dma_req *_req) | |
301 | { | |
302 | unsigned long irq_flags; | |
303 | struct tegra_dma_req *req; | |
304 | ||
305 | spin_lock_irqsave(&ch->lock, irq_flags); | |
306 | list_for_each_entry(req, &ch->list, node) { | |
307 | if (req == _req) { | |
308 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
309 | return true; | |
310 | } | |
311 | } | |
312 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
313 | return false; | |
314 | } | |
315 | EXPORT_SYMBOL(tegra_dma_is_req_inflight); | |
316 | ||
317 | int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | |
318 | struct tegra_dma_req *req) | |
319 | { | |
320 | unsigned long irq_flags; | |
321 | int start_dma = 0; | |
322 | ||
323 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || | |
324 | req->source_addr & 0x3 || req->dest_addr & 0x3) { | |
325 | pr_err("Invalid DMA request for channel %d\n", ch->id); | |
326 | return -EINVAL; | |
327 | } | |
328 | ||
329 | spin_lock_irqsave(&ch->lock, irq_flags); | |
330 | ||
331 | req->bytes_transferred = 0; | |
332 | req->status = 0; | |
333 | req->buffer_status = 0; | |
334 | if (list_empty(&ch->list)) | |
335 | start_dma = 1; | |
336 | ||
337 | list_add_tail(&req->node, &ch->list); | |
338 | ||
339 | if (start_dma) | |
340 | tegra_dma_update_hw(ch, req); | |
341 | ||
342 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
343 | ||
344 | return 0; | |
345 | } | |
346 | EXPORT_SYMBOL(tegra_dma_enqueue_req); | |
347 | ||
348 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | |
349 | { | |
350 | int channel; | |
351 | struct tegra_dma_channel *ch; | |
352 | ||
353 | /* first channel is the shared channel */ | |
354 | if (mode & TEGRA_DMA_SHARED) { | |
355 | channel = TEGRA_SYSTEM_DMA_CH_MIN; | |
356 | } else { | |
357 | channel = find_first_zero_bit(channel_usage, | |
358 | ARRAY_SIZE(dma_channels)); | |
359 | if (channel >= ARRAY_SIZE(dma_channels)) | |
360 | return NULL; | |
361 | } | |
362 | __set_bit(channel, channel_usage); | |
363 | ch = &dma_channels[channel]; | |
364 | ch->mode = mode; | |
365 | return ch; | |
366 | } | |
367 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | |
368 | ||
369 | void tegra_dma_free_channel(struct tegra_dma_channel *ch) | |
370 | { | |
371 | if (ch->mode & TEGRA_DMA_SHARED) | |
372 | return; | |
373 | tegra_dma_cancel(ch); | |
374 | __clear_bit(ch->id, channel_usage); | |
375 | } | |
376 | EXPORT_SYMBOL(tegra_dma_free_channel); | |
377 | ||
378 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | |
379 | struct tegra_dma_req *req) | |
380 | { | |
381 | if (req->to_memory) { | |
382 | ch->apb_ptr = req->source_addr; | |
383 | ch->ahb_ptr = req->dest_addr; | |
384 | } else { | |
385 | ch->apb_ptr = req->dest_addr; | |
386 | ch->ahb_ptr = req->source_addr; | |
387 | } | |
388 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | |
389 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | |
390 | ||
391 | req->status = TEGRA_DMA_REQ_INFLIGHT; | |
392 | return; | |
393 | } | |
394 | ||
395 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |
396 | struct tegra_dma_req *req) | |
397 | { | |
398 | int ahb_addr_wrap; | |
399 | int apb_addr_wrap; | |
400 | int ahb_bus_width; | |
401 | int apb_bus_width; | |
402 | int index; | |
403 | unsigned long csr; | |
404 | ||
405 | ||
406 | ch->csr |= CSR_FLOW; | |
407 | ch->csr &= ~CSR_REQ_SEL_MASK; | |
408 | ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | |
409 | ch->ahb_seq &= ~AHB_SEQ_BURST_MASK; | |
410 | ch->ahb_seq |= AHB_SEQ_BURST_1; | |
411 | ||
412 | /* One shot mode is always single buffered, | |
413 | * continuous mode is always double buffered | |
414 | * */ | |
415 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | |
416 | ch->csr |= CSR_ONCE; | |
417 | ch->ahb_seq &= ~AHB_SEQ_DBL_BUF; | |
418 | ch->csr &= ~CSR_WCOUNT_MASK; | |
419 | ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT; | |
420 | } else { | |
421 | ch->csr &= ~CSR_ONCE; | |
422 | ch->ahb_seq |= AHB_SEQ_DBL_BUF; | |
423 | ||
424 | /* In double buffered mode, we set the size to half the | |
425 | * requested size and interrupt when half the buffer | |
426 | * is full */ | |
427 | ch->csr &= ~CSR_WCOUNT_MASK; | |
428 | ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT; | |
429 | } | |
430 | ||
431 | if (req->to_memory) { | |
432 | ch->csr &= ~CSR_DIR; | |
433 | ch->apb_ptr = req->source_addr; | |
434 | ch->ahb_ptr = req->dest_addr; | |
435 | ||
436 | apb_addr_wrap = req->source_wrap; | |
437 | ahb_addr_wrap = req->dest_wrap; | |
438 | apb_bus_width = req->source_bus_width; | |
439 | ahb_bus_width = req->dest_bus_width; | |
440 | ||
441 | } else { | |
442 | ch->csr |= CSR_DIR; | |
443 | ch->apb_ptr = req->dest_addr; | |
444 | ch->ahb_ptr = req->source_addr; | |
445 | ||
446 | apb_addr_wrap = req->dest_wrap; | |
447 | ahb_addr_wrap = req->source_wrap; | |
448 | apb_bus_width = req->dest_bus_width; | |
449 | ahb_bus_width = req->source_bus_width; | |
450 | } | |
451 | ||
452 | apb_addr_wrap >>= 2; | |
453 | ahb_addr_wrap >>= 2; | |
454 | ||
455 | /* set address wrap for APB size */ | |
456 | index = 0; | |
457 | do { | |
458 | if (apb_addr_wrap_table[index] == apb_addr_wrap) | |
459 | break; | |
460 | index++; | |
461 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | |
462 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | |
463 | ch->apb_seq &= ~APB_SEQ_WRAP_MASK; | |
464 | ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT; | |
465 | ||
466 | /* set address wrap for AHB size */ | |
467 | index = 0; | |
468 | do { | |
469 | if (ahb_addr_wrap_table[index] == ahb_addr_wrap) | |
470 | break; | |
471 | index++; | |
472 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | |
473 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | |
474 | ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK; | |
475 | ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | |
476 | ||
477 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | |
478 | if (bus_width_table[index] == ahb_bus_width) | |
479 | break; | |
480 | } | |
481 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | |
482 | ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK; | |
483 | ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | |
484 | ||
485 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | |
486 | if (bus_width_table[index] == apb_bus_width) | |
487 | break; | |
488 | } | |
489 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | |
490 | ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK; | |
491 | ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | |
492 | ||
493 | ch->csr |= CSR_IE_EOC; | |
494 | ||
495 | /* update hw registers with the shadow */ | |
496 | writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR); | |
497 | writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | |
498 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | |
499 | writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | |
500 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | |
501 | ||
502 | csr = ch->csr | CSR_ENB; | |
503 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | |
504 | ||
505 | req->status = TEGRA_DMA_REQ_INFLIGHT; | |
506 | } | |
507 | ||
508 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch) | |
509 | { | |
510 | /* One shot with an interrupt to CPU after transfer */ | |
511 | ch->csr = CSR_ONCE | CSR_IE_EOC; | |
512 | ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB; | |
513 | ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT; | |
514 | } | |
515 | ||
516 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |
517 | { | |
518 | struct tegra_dma_req *req; | |
519 | ||
520 | spin_lock(&ch->lock); | |
521 | if (list_empty(&ch->list)) { | |
522 | spin_unlock(&ch->lock); | |
523 | return; | |
524 | } | |
525 | ||
526 | req = list_entry(ch->list.next, typeof(*req), node); | |
527 | if (req) { | |
528 | int bytes_transferred; | |
529 | ||
530 | bytes_transferred = | |
531 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | |
532 | bytes_transferred += 1; | |
533 | bytes_transferred <<= 2; | |
534 | ||
535 | list_del(&req->node); | |
536 | req->bytes_transferred = bytes_transferred; | |
537 | req->status = TEGRA_DMA_REQ_SUCCESS; | |
538 | ||
539 | spin_unlock(&ch->lock); | |
540 | /* Callback should be called without any lock */ | |
541 | pr_debug("%s: transferred %d bytes\n", __func__, | |
542 | req->bytes_transferred); | |
543 | req->complete(req); | |
544 | spin_lock(&ch->lock); | |
545 | } | |
546 | ||
547 | if (!list_empty(&ch->list)) { | |
548 | req = list_entry(ch->list.next, typeof(*req), node); | |
549 | /* the complete function we just called may have enqueued | |
550 | another req, in which case dma has already started */ | |
551 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) | |
552 | tegra_dma_update_hw(ch, req); | |
553 | } | |
554 | spin_unlock(&ch->lock); | |
555 | } | |
556 | ||
557 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | |
558 | { | |
559 | struct tegra_dma_req *req; | |
560 | ||
561 | spin_lock(&ch->lock); | |
562 | if (list_empty(&ch->list)) { | |
563 | spin_unlock(&ch->lock); | |
564 | return; | |
565 | } | |
566 | ||
567 | req = list_entry(ch->list.next, typeof(*req), node); | |
568 | if (req) { | |
569 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | |
570 | /* Load the next request into the hardware, if available | |
571 | * */ | |
572 | if (!list_is_last(&req->node, &ch->list)) { | |
573 | struct tegra_dma_req *next_req; | |
574 | ||
575 | next_req = list_entry(req->node.next, | |
576 | typeof(*next_req), node); | |
577 | tegra_dma_update_hw_partial(ch, next_req); | |
578 | } | |
579 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | |
580 | req->status = TEGRA_DMA_REQ_SUCCESS; | |
581 | /* DMA lock is NOT held when callback is called */ | |
582 | spin_unlock(&ch->lock); | |
583 | if (likely(req->threshold)) | |
584 | req->threshold(req); | |
585 | return; | |
586 | ||
587 | } else if (req->buffer_status == | |
588 | TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { | |
589 | /* Callback when the buffer is completely full (i.e on | |
590 | * the second interrupt */ | |
591 | int bytes_transferred; | |
592 | ||
593 | bytes_transferred = | |
594 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | |
595 | bytes_transferred += 1; | |
596 | bytes_transferred <<= 3; | |
597 | ||
598 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | |
599 | req->bytes_transferred = bytes_transferred; | |
600 | req->status = TEGRA_DMA_REQ_SUCCESS; | |
601 | list_del(&req->node); | |
602 | ||
603 | /* DMA lock is NOT held when callbak is called */ | |
604 | spin_unlock(&ch->lock); | |
605 | req->complete(req); | |
606 | return; | |
607 | ||
608 | } else { | |
609 | BUG(); | |
610 | } | |
611 | } | |
612 | spin_unlock(&ch->lock); | |
613 | } | |
614 | ||
615 | static irqreturn_t dma_isr(int irq, void *data) | |
616 | { | |
617 | struct tegra_dma_channel *ch = data; | |
618 | unsigned long status; | |
619 | ||
620 | status = readl(ch->addr + APB_DMA_CHAN_STA); | |
621 | if (status & STA_ISE_EOC) | |
622 | writel(status, ch->addr + APB_DMA_CHAN_STA); | |
623 | else { | |
624 | pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); | |
625 | return IRQ_HANDLED; | |
626 | } | |
627 | return IRQ_WAKE_THREAD; | |
628 | } | |
629 | ||
630 | static irqreturn_t dma_thread_fn(int irq, void *data) | |
631 | { | |
632 | struct tegra_dma_channel *ch = data; | |
633 | ||
634 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) | |
635 | handle_oneshot_dma(ch); | |
636 | else | |
637 | handle_continuous_dma(ch); | |
638 | ||
639 | ||
640 | return IRQ_HANDLED; | |
641 | } | |
642 | ||
643 | int __init tegra_dma_init(void) | |
644 | { | |
645 | int ret = 0; | |
646 | int i; | |
647 | unsigned int irq; | |
648 | void __iomem *addr; | |
649 | ||
650 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | |
651 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | |
652 | writel(0, addr + APB_DMA_CNTRL); | |
653 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | |
654 | addr + APB_DMA_IRQ_MASK_SET); | |
655 | ||
656 | memset(channel_usage, 0, sizeof(channel_usage)); | |
657 | memset(dma_channels, 0, sizeof(dma_channels)); | |
658 | ||
659 | /* Reserve all the channels we are not supposed to touch */ | |
660 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++) | |
661 | __set_bit(i, channel_usage); | |
662 | ||
663 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | |
664 | struct tegra_dma_channel *ch = &dma_channels[i]; | |
665 | ||
666 | __clear_bit(i, channel_usage); | |
667 | ||
668 | ch->id = i; | |
669 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | |
670 | ||
671 | ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | |
672 | TEGRA_APB_DMA_CH0_SIZE * i); | |
673 | ||
674 | spin_lock_init(&ch->lock); | |
675 | INIT_LIST_HEAD(&ch->list); | |
676 | tegra_dma_init_hw(ch); | |
677 | ||
678 | irq = INT_APB_DMA_CH0 + i; | |
679 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | |
680 | dma_channels[i].name, ch); | |
681 | if (ret) { | |
682 | pr_err("Failed to register IRQ %d for DMA %d\n", | |
683 | irq, i); | |
684 | goto fail; | |
685 | } | |
686 | ch->irq = irq; | |
687 | } | |
688 | /* mark the shared channel allocated */ | |
689 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | |
690 | ||
691 | for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++) | |
692 | __set_bit(i, channel_usage); | |
693 | ||
694 | return ret; | |
695 | fail: | |
696 | writel(0, addr + APB_DMA_GEN); | |
697 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | |
698 | struct tegra_dma_channel *ch = &dma_channels[i]; | |
699 | if (ch->irq) | |
700 | free_irq(ch->irq, ch); | |
701 | } | |
702 | return ret; | |
703 | } | |
704 | ||
705 | #ifdef CONFIG_PM | |
706 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | |
707 | ||
708 | void tegra_dma_suspend(void) | |
709 | { | |
710 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | |
711 | u32 *ctx = apb_dma; | |
712 | int i; | |
713 | ||
714 | *ctx++ = readl(addr + APB_DMA_GEN); | |
715 | *ctx++ = readl(addr + APB_DMA_CNTRL); | |
716 | *ctx++ = readl(addr + APB_DMA_IRQ_MASK); | |
717 | ||
718 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | |
719 | addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | |
720 | TEGRA_APB_DMA_CH0_SIZE * i); | |
721 | ||
722 | *ctx++ = readl(addr + APB_DMA_CHAN_CSR); | |
723 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR); | |
724 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ); | |
725 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); | |
726 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); | |
727 | } | |
728 | } | |
729 | ||
730 | void tegra_dma_resume(void) | |
731 | { | |
732 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | |
733 | u32 *ctx = apb_dma; | |
734 | int i; | |
735 | ||
736 | writel(*ctx++, addr + APB_DMA_GEN); | |
737 | writel(*ctx++, addr + APB_DMA_CNTRL); | |
738 | writel(*ctx++, addr + APB_DMA_IRQ_MASK); | |
739 | ||
740 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | |
741 | addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | |
742 | TEGRA_APB_DMA_CH0_SIZE * i); | |
743 | ||
744 | writel(*ctx++, addr + APB_DMA_CHAN_CSR); | |
745 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR); | |
746 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ); | |
747 | writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR); | |
748 | writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ); | |
749 | } | |
750 | } | |
751 | ||
752 | #endif |