2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/clk.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
28 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/slab.h>
36 #include "dmaengine.h"
38 #define TEGRA_APBDMA_GENERAL 0x0
39 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
41 #define TEGRA_APBDMA_CONTROL 0x010
42 #define TEGRA_APBDMA_IRQ_MASK 0x01c
43 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
46 #define TEGRA_APBDMA_CHAN_CSR 0x00
47 #define TEGRA_APBDMA_CSR_ENB BIT(31)
48 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
49 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
50 #define TEGRA_APBDMA_CSR_DIR BIT(28)
51 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
52 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
53 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
54 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
57 #define TEGRA_APBDMA_CHAN_STATUS 0x004
58 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
59 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
60 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
61 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
62 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
63 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
65 /* AHB memory address */
66 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
68 /* AHB sequence register */
69 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
70 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
76 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
77 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
78 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
79 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
80 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
81 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
82 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
85 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
87 /* APB sequence register */
88 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
94 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
95 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
98 * If any burst is in flight and DMA paused then this is the time to complete
99 * on-flight burst and update DMA status register.
101 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
103 /* Channel base address offset from APBDMA base address */
104 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
106 /* DMA channel register space size */
107 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
112 * tegra_dma_chip_data Tegra chip specific DMA data
113 * @nr_channels: Number of channels available in the controller.
114 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
116 struct tegra_dma_chip_data
{
121 /* DMA channel registers */
122 struct tegra_dma_channel_regs
{
124 unsigned long ahb_ptr
;
125 unsigned long apb_ptr
;
126 unsigned long ahb_seq
;
127 unsigned long apb_seq
;
131 * tegra_dma_sg_req: Dma request details to configure hardware. This
132 * contains the details for one transfer to configure DMA hw.
133 * The client's request for data transfer can be broken into multiple
134 * sub-transfer as per requester details and hw support.
135 * This sub transfer get added in the list of transfer and point to Tegra
136 * DMA descriptor which manages the transfer details.
138 struct tegra_dma_sg_req
{
139 struct tegra_dma_channel_regs ch_regs
;
144 struct list_head node
;
145 struct tegra_dma_desc
*dma_desc
;
149 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
150 * This descriptor keep track of transfer status, callbacks and request
153 struct tegra_dma_desc
{
154 struct dma_async_tx_descriptor txd
;
156 int bytes_transferred
;
157 enum dma_status dma_status
;
158 struct list_head node
;
159 struct list_head tx_list
;
160 struct list_head cb_node
;
164 struct tegra_dma_channel
;
166 typedef void (*dma_isr_handler
)(struct tegra_dma_channel
*tdc
,
169 /* tegra_dma_channel: Channel specific information */
170 struct tegra_dma_channel
{
171 struct dma_chan dma_chan
;
176 unsigned long chan_base_offset
;
179 struct tegra_dma
*tdma
;
182 /* Different lists for managing the requests */
183 struct list_head free_sg_req
;
184 struct list_head pending_sg_req
;
185 struct list_head free_dma_desc
;
186 struct list_head cb_desc
;
188 /* ISR handler and tasklet for bottom half of isr handling */
189 dma_isr_handler isr_handler
;
190 struct tasklet_struct tasklet
;
191 dma_async_tx_callback callback
;
192 void *callback_param
;
194 /* Channel-slave specific configuration */
195 struct dma_slave_config dma_sconfig
;
198 /* tegra_dma: Tegra DMA specific information */
200 struct dma_device dma_dev
;
203 spinlock_t global_lock
;
204 void __iomem
*base_addr
;
205 const struct tegra_dma_chip_data
*chip_data
;
207 /* Some register need to be cache before suspend */
210 /* Last member of the structure */
211 struct tegra_dma_channel channels
[0];
214 static inline void tdma_write(struct tegra_dma
*tdma
, u32 reg
, u32 val
)
216 writel(val
, tdma
->base_addr
+ reg
);
219 static inline u32
tdma_read(struct tegra_dma
*tdma
, u32 reg
)
221 return readl(tdma
->base_addr
+ reg
);
224 static inline void tdc_write(struct tegra_dma_channel
*tdc
,
227 writel(val
, tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
230 static inline u32
tdc_read(struct tegra_dma_channel
*tdc
, u32 reg
)
232 return readl(tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
235 static inline struct tegra_dma_channel
*to_tegra_dma_chan(struct dma_chan
*dc
)
237 return container_of(dc
, struct tegra_dma_channel
, dma_chan
);
240 static inline struct tegra_dma_desc
*txd_to_tegra_dma_desc(
241 struct dma_async_tx_descriptor
*td
)
243 return container_of(td
, struct tegra_dma_desc
, txd
);
246 static inline struct device
*tdc2dev(struct tegra_dma_channel
*tdc
)
248 return &tdc
->dma_chan
.dev
->device
;
251 static dma_cookie_t
tegra_dma_tx_submit(struct dma_async_tx_descriptor
*tx
);
252 static int tegra_dma_runtime_suspend(struct device
*dev
);
253 static int tegra_dma_runtime_resume(struct device
*dev
);
255 /* Get DMA desc from free list, if not there then allocate it. */
256 static struct tegra_dma_desc
*tegra_dma_desc_get(
257 struct tegra_dma_channel
*tdc
)
259 struct tegra_dma_desc
*dma_desc
;
262 spin_lock_irqsave(&tdc
->lock
, flags
);
264 /* Do not allocate if desc are waiting for ack */
265 list_for_each_entry(dma_desc
, &tdc
->free_dma_desc
, node
) {
266 if (async_tx_test_ack(&dma_desc
->txd
)) {
267 list_del(&dma_desc
->node
);
268 spin_unlock_irqrestore(&tdc
->lock
, flags
);
273 spin_unlock_irqrestore(&tdc
->lock
, flags
);
275 /* Allocate DMA desc */
276 dma_desc
= kzalloc(sizeof(*dma_desc
), GFP_ATOMIC
);
278 dev_err(tdc2dev(tdc
), "dma_desc alloc failed\n");
282 dma_async_tx_descriptor_init(&dma_desc
->txd
, &tdc
->dma_chan
);
283 dma_desc
->txd
.tx_submit
= tegra_dma_tx_submit
;
284 dma_desc
->txd
.flags
= 0;
288 static void tegra_dma_desc_put(struct tegra_dma_channel
*tdc
,
289 struct tegra_dma_desc
*dma_desc
)
293 spin_lock_irqsave(&tdc
->lock
, flags
);
294 if (!list_empty(&dma_desc
->tx_list
))
295 list_splice_init(&dma_desc
->tx_list
, &tdc
->free_sg_req
);
296 list_add_tail(&dma_desc
->node
, &tdc
->free_dma_desc
);
297 spin_unlock_irqrestore(&tdc
->lock
, flags
);
300 static struct tegra_dma_sg_req
*tegra_dma_sg_req_get(
301 struct tegra_dma_channel
*tdc
)
303 struct tegra_dma_sg_req
*sg_req
= NULL
;
306 spin_lock_irqsave(&tdc
->lock
, flags
);
307 if (!list_empty(&tdc
->free_sg_req
)) {
308 sg_req
= list_first_entry(&tdc
->free_sg_req
,
309 typeof(*sg_req
), node
);
310 list_del(&sg_req
->node
);
311 spin_unlock_irqrestore(&tdc
->lock
, flags
);
314 spin_unlock_irqrestore(&tdc
->lock
, flags
);
316 sg_req
= kzalloc(sizeof(struct tegra_dma_sg_req
), GFP_ATOMIC
);
318 dev_err(tdc2dev(tdc
), "sg_req alloc failed\n");
322 static int tegra_dma_slave_config(struct dma_chan
*dc
,
323 struct dma_slave_config
*sconfig
)
325 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
327 if (!list_empty(&tdc
->pending_sg_req
)) {
328 dev_err(tdc2dev(tdc
), "Configuration not allowed\n");
332 memcpy(&tdc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
333 tdc
->config_init
= true;
337 static void tegra_dma_global_pause(struct tegra_dma_channel
*tdc
,
338 bool wait_for_burst_complete
)
340 struct tegra_dma
*tdma
= tdc
->tdma
;
342 spin_lock(&tdma
->global_lock
);
343 tdma_write(tdma
, TEGRA_APBDMA_GENERAL
, 0);
344 if (wait_for_burst_complete
)
345 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME
);
348 static void tegra_dma_global_resume(struct tegra_dma_channel
*tdc
)
350 struct tegra_dma
*tdma
= tdc
->tdma
;
352 tdma_write(tdma
, TEGRA_APBDMA_GENERAL
, TEGRA_APBDMA_GENERAL_ENABLE
);
353 spin_unlock(&tdma
->global_lock
);
356 static void tegra_dma_stop(struct tegra_dma_channel
*tdc
)
361 /* Disable interrupts */
362 csr
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_CSR
);
363 csr
&= ~TEGRA_APBDMA_CSR_IE_EOC
;
364 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
, csr
);
367 csr
&= ~TEGRA_APBDMA_CSR_ENB
;
368 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
, csr
);
370 /* Clear interrupt status if it is there */
371 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
372 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
373 dev_dbg(tdc2dev(tdc
), "%s():clearing interrupt\n", __func__
);
374 tdc_write(tdc
, TEGRA_APBDMA_CHAN_STATUS
, status
);
379 static void tegra_dma_start(struct tegra_dma_channel
*tdc
,
380 struct tegra_dma_sg_req
*sg_req
)
382 struct tegra_dma_channel_regs
*ch_regs
= &sg_req
->ch_regs
;
384 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
, ch_regs
->csr
);
385 tdc_write(tdc
, TEGRA_APBDMA_CHAN_APBSEQ
, ch_regs
->apb_seq
);
386 tdc_write(tdc
, TEGRA_APBDMA_CHAN_APBPTR
, ch_regs
->apb_ptr
);
387 tdc_write(tdc
, TEGRA_APBDMA_CHAN_AHBSEQ
, ch_regs
->ahb_seq
);
388 tdc_write(tdc
, TEGRA_APBDMA_CHAN_AHBPTR
, ch_regs
->ahb_ptr
);
391 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
,
392 ch_regs
->csr
| TEGRA_APBDMA_CSR_ENB
);
395 static void tegra_dma_configure_for_next(struct tegra_dma_channel
*tdc
,
396 struct tegra_dma_sg_req
*nsg_req
)
398 unsigned long status
;
401 * The DMA controller reloads the new configuration for next transfer
402 * after last burst of current transfer completes.
403 * If there is no IEC status then this makes sure that last burst
404 * has not be completed. There may be case that last burst is on
405 * flight and so it can complete but because DMA is paused, it
406 * will not generates interrupt as well as not reload the new
408 * If there is already IEC status then interrupt handler need to
409 * load new configuration.
411 tegra_dma_global_pause(tdc
, false);
412 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
415 * If interrupt is pending then do nothing as the ISR will handle
416 * the programing for new request.
418 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
419 dev_err(tdc2dev(tdc
),
420 "Skipping new configuration as interrupt is pending\n");
421 tegra_dma_global_resume(tdc
);
425 /* Safe to program new configuration */
426 tdc_write(tdc
, TEGRA_APBDMA_CHAN_APBPTR
, nsg_req
->ch_regs
.apb_ptr
);
427 tdc_write(tdc
, TEGRA_APBDMA_CHAN_AHBPTR
, nsg_req
->ch_regs
.ahb_ptr
);
428 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
,
429 nsg_req
->ch_regs
.csr
| TEGRA_APBDMA_CSR_ENB
);
430 nsg_req
->configured
= true;
432 tegra_dma_global_resume(tdc
);
435 static void tdc_start_head_req(struct tegra_dma_channel
*tdc
)
437 struct tegra_dma_sg_req
*sg_req
;
439 if (list_empty(&tdc
->pending_sg_req
))
442 sg_req
= list_first_entry(&tdc
->pending_sg_req
,
443 typeof(*sg_req
), node
);
444 tegra_dma_start(tdc
, sg_req
);
445 sg_req
->configured
= true;
449 static void tdc_configure_next_head_desc(struct tegra_dma_channel
*tdc
)
451 struct tegra_dma_sg_req
*hsgreq
;
452 struct tegra_dma_sg_req
*hnsgreq
;
454 if (list_empty(&tdc
->pending_sg_req
))
457 hsgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*hsgreq
), node
);
458 if (!list_is_last(&hsgreq
->node
, &tdc
->pending_sg_req
)) {
459 hnsgreq
= list_first_entry(&hsgreq
->node
,
460 typeof(*hnsgreq
), node
);
461 tegra_dma_configure_for_next(tdc
, hnsgreq
);
465 static inline int get_current_xferred_count(struct tegra_dma_channel
*tdc
,
466 struct tegra_dma_sg_req
*sg_req
, unsigned long status
)
468 return sg_req
->req_len
- (status
& TEGRA_APBDMA_STATUS_COUNT_MASK
) - 4;
471 static void tegra_dma_abort_all(struct tegra_dma_channel
*tdc
)
473 struct tegra_dma_sg_req
*sgreq
;
474 struct tegra_dma_desc
*dma_desc
;
476 while (!list_empty(&tdc
->pending_sg_req
)) {
477 sgreq
= list_first_entry(&tdc
->pending_sg_req
,
478 typeof(*sgreq
), node
);
479 list_move_tail(&sgreq
->node
, &tdc
->free_sg_req
);
480 if (sgreq
->last_sg
) {
481 dma_desc
= sgreq
->dma_desc
;
482 dma_desc
->dma_status
= DMA_ERROR
;
483 list_add_tail(&dma_desc
->node
, &tdc
->free_dma_desc
);
485 /* Add in cb list if it is not there. */
486 if (!dma_desc
->cb_count
)
487 list_add_tail(&dma_desc
->cb_node
,
489 dma_desc
->cb_count
++;
492 tdc
->isr_handler
= NULL
;
495 static bool handle_continuous_head_request(struct tegra_dma_channel
*tdc
,
496 struct tegra_dma_sg_req
*last_sg_req
, bool to_terminate
)
498 struct tegra_dma_sg_req
*hsgreq
= NULL
;
500 if (list_empty(&tdc
->pending_sg_req
)) {
501 dev_err(tdc2dev(tdc
), "Dma is running without req\n");
507 * Check that head req on list should be in flight.
508 * If it is not in flight then abort transfer as
509 * looping of transfer can not continue.
511 hsgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*hsgreq
), node
);
512 if (!hsgreq
->configured
) {
514 dev_err(tdc2dev(tdc
), "Error in dma transfer, aborting dma\n");
515 tegra_dma_abort_all(tdc
);
519 /* Configure next request */
521 tdc_configure_next_head_desc(tdc
);
525 static void handle_once_dma_done(struct tegra_dma_channel
*tdc
,
528 struct tegra_dma_sg_req
*sgreq
;
529 struct tegra_dma_desc
*dma_desc
;
532 sgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*sgreq
), node
);
533 dma_desc
= sgreq
->dma_desc
;
534 dma_desc
->bytes_transferred
+= sgreq
->req_len
;
536 list_del(&sgreq
->node
);
537 if (sgreq
->last_sg
) {
538 dma_desc
->dma_status
= DMA_SUCCESS
;
539 dma_cookie_complete(&dma_desc
->txd
);
540 if (!dma_desc
->cb_count
)
541 list_add_tail(&dma_desc
->cb_node
, &tdc
->cb_desc
);
542 dma_desc
->cb_count
++;
543 list_add_tail(&dma_desc
->node
, &tdc
->free_dma_desc
);
545 list_add_tail(&sgreq
->node
, &tdc
->free_sg_req
);
547 /* Do not start DMA if it is going to be terminate */
548 if (to_terminate
|| list_empty(&tdc
->pending_sg_req
))
551 tdc_start_head_req(tdc
);
555 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel
*tdc
,
558 struct tegra_dma_sg_req
*sgreq
;
559 struct tegra_dma_desc
*dma_desc
;
562 sgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*sgreq
), node
);
563 dma_desc
= sgreq
->dma_desc
;
564 dma_desc
->bytes_transferred
+= sgreq
->req_len
;
566 /* Callback need to be call */
567 if (!dma_desc
->cb_count
)
568 list_add_tail(&dma_desc
->cb_node
, &tdc
->cb_desc
);
569 dma_desc
->cb_count
++;
571 /* If not last req then put at end of pending list */
572 if (!list_is_last(&sgreq
->node
, &tdc
->pending_sg_req
)) {
573 list_move_tail(&sgreq
->node
, &tdc
->pending_sg_req
);
574 sgreq
->configured
= false;
575 st
= handle_continuous_head_request(tdc
, sgreq
, to_terminate
);
577 dma_desc
->dma_status
= DMA_ERROR
;
582 static void tegra_dma_tasklet(unsigned long data
)
584 struct tegra_dma_channel
*tdc
= (struct tegra_dma_channel
*)data
;
585 dma_async_tx_callback callback
= NULL
;
586 void *callback_param
= NULL
;
587 struct tegra_dma_desc
*dma_desc
;
591 spin_lock_irqsave(&tdc
->lock
, flags
);
592 while (!list_empty(&tdc
->cb_desc
)) {
593 dma_desc
= list_first_entry(&tdc
->cb_desc
,
594 typeof(*dma_desc
), cb_node
);
595 list_del(&dma_desc
->cb_node
);
596 callback
= dma_desc
->txd
.callback
;
597 callback_param
= dma_desc
->txd
.callback_param
;
598 cb_count
= dma_desc
->cb_count
;
599 dma_desc
->cb_count
= 0;
600 spin_unlock_irqrestore(&tdc
->lock
, flags
);
601 while (cb_count
-- && callback
)
602 callback(callback_param
);
603 spin_lock_irqsave(&tdc
->lock
, flags
);
605 spin_unlock_irqrestore(&tdc
->lock
, flags
);
608 static irqreturn_t
tegra_dma_isr(int irq
, void *dev_id
)
610 struct tegra_dma_channel
*tdc
= dev_id
;
611 unsigned long status
;
614 spin_lock_irqsave(&tdc
->lock
, flags
);
616 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
617 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
618 tdc_write(tdc
, TEGRA_APBDMA_CHAN_STATUS
, status
);
619 tdc
->isr_handler(tdc
, false);
620 tasklet_schedule(&tdc
->tasklet
);
621 spin_unlock_irqrestore(&tdc
->lock
, flags
);
625 spin_unlock_irqrestore(&tdc
->lock
, flags
);
626 dev_info(tdc2dev(tdc
),
627 "Interrupt already served status 0x%08lx\n", status
);
631 static dma_cookie_t
tegra_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
633 struct tegra_dma_desc
*dma_desc
= txd_to_tegra_dma_desc(txd
);
634 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(txd
->chan
);
638 spin_lock_irqsave(&tdc
->lock
, flags
);
639 dma_desc
->dma_status
= DMA_IN_PROGRESS
;
640 cookie
= dma_cookie_assign(&dma_desc
->txd
);
641 list_splice_tail_init(&dma_desc
->tx_list
, &tdc
->pending_sg_req
);
642 spin_unlock_irqrestore(&tdc
->lock
, flags
);
646 static void tegra_dma_issue_pending(struct dma_chan
*dc
)
648 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
651 spin_lock_irqsave(&tdc
->lock
, flags
);
652 if (list_empty(&tdc
->pending_sg_req
)) {
653 dev_err(tdc2dev(tdc
), "No DMA request\n");
657 tdc_start_head_req(tdc
);
659 /* Continuous single mode: Configure next req */
662 * Wait for 1 burst time for configure DMA for
665 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME
);
666 tdc_configure_next_head_desc(tdc
);
670 spin_unlock_irqrestore(&tdc
->lock
, flags
);
674 static void tegra_dma_terminate_all(struct dma_chan
*dc
)
676 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
677 struct tegra_dma_sg_req
*sgreq
;
678 struct tegra_dma_desc
*dma_desc
;
680 unsigned long status
;
683 spin_lock_irqsave(&tdc
->lock
, flags
);
684 if (list_empty(&tdc
->pending_sg_req
)) {
685 spin_unlock_irqrestore(&tdc
->lock
, flags
);
692 /* Pause DMA before checking the queue status */
693 tegra_dma_global_pause(tdc
, true);
695 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
696 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
697 dev_dbg(tdc2dev(tdc
), "%s():handling isr\n", __func__
);
698 tdc
->isr_handler(tdc
, true);
699 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
702 was_busy
= tdc
->busy
;
705 if (!list_empty(&tdc
->pending_sg_req
) && was_busy
) {
706 sgreq
= list_first_entry(&tdc
->pending_sg_req
,
707 typeof(*sgreq
), node
);
708 sgreq
->dma_desc
->bytes_transferred
+=
709 get_current_xferred_count(tdc
, sgreq
, status
);
711 tegra_dma_global_resume(tdc
);
714 tegra_dma_abort_all(tdc
);
716 while (!list_empty(&tdc
->cb_desc
)) {
717 dma_desc
= list_first_entry(&tdc
->cb_desc
,
718 typeof(*dma_desc
), cb_node
);
719 list_del(&dma_desc
->cb_node
);
720 dma_desc
->cb_count
= 0;
722 spin_unlock_irqrestore(&tdc
->lock
, flags
);
725 static enum dma_status
tegra_dma_tx_status(struct dma_chan
*dc
,
726 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
728 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
729 struct tegra_dma_desc
*dma_desc
;
730 struct tegra_dma_sg_req
*sg_req
;
733 unsigned int residual
;
735 spin_lock_irqsave(&tdc
->lock
, flags
);
737 ret
= dma_cookie_status(dc
, cookie
, txstate
);
738 if (ret
== DMA_SUCCESS
) {
739 dma_set_residue(txstate
, 0);
740 spin_unlock_irqrestore(&tdc
->lock
, flags
);
744 /* Check on wait_ack desc status */
745 list_for_each_entry(dma_desc
, &tdc
->free_dma_desc
, node
) {
746 if (dma_desc
->txd
.cookie
== cookie
) {
747 residual
= dma_desc
->bytes_requested
-
748 (dma_desc
->bytes_transferred
%
749 dma_desc
->bytes_requested
);
750 dma_set_residue(txstate
, residual
);
751 ret
= dma_desc
->dma_status
;
752 spin_unlock_irqrestore(&tdc
->lock
, flags
);
757 /* Check in pending list */
758 list_for_each_entry(sg_req
, &tdc
->pending_sg_req
, node
) {
759 dma_desc
= sg_req
->dma_desc
;
760 if (dma_desc
->txd
.cookie
== cookie
) {
761 residual
= dma_desc
->bytes_requested
-
762 (dma_desc
->bytes_transferred
%
763 dma_desc
->bytes_requested
);
764 dma_set_residue(txstate
, residual
);
765 ret
= dma_desc
->dma_status
;
766 spin_unlock_irqrestore(&tdc
->lock
, flags
);
771 dev_dbg(tdc2dev(tdc
), "cookie %d does not found\n", cookie
);
772 spin_unlock_irqrestore(&tdc
->lock
, flags
);
776 static int tegra_dma_device_control(struct dma_chan
*dc
, enum dma_ctrl_cmd cmd
,
780 case DMA_SLAVE_CONFIG
:
781 return tegra_dma_slave_config(dc
,
782 (struct dma_slave_config
*)arg
);
784 case DMA_TERMINATE_ALL
:
785 tegra_dma_terminate_all(dc
);
795 static inline int get_bus_width(struct tegra_dma_channel
*tdc
,
796 enum dma_slave_buswidth slave_bw
)
799 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
800 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8
;
801 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
802 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16
;
803 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
804 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32
;
805 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
806 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64
;
808 dev_warn(tdc2dev(tdc
),
809 "slave bw is not supported, using 32bits\n");
810 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32
;
814 static inline int get_burst_size(struct tegra_dma_channel
*tdc
,
815 u32 burst_size
, enum dma_slave_buswidth slave_bw
, int len
)
821 * burst_size from client is in terms of the bus_width.
822 * convert them into AHB memory width which is 4 byte.
824 burst_byte
= burst_size
* slave_bw
;
825 burst_ahb_width
= burst_byte
/ 4;
827 /* If burst size is 0 then calculate the burst size based on length */
828 if (!burst_ahb_width
) {
830 return TEGRA_APBDMA_AHBSEQ_BURST_1
;
831 else if ((len
>> 4) & 0x1)
832 return TEGRA_APBDMA_AHBSEQ_BURST_4
;
834 return TEGRA_APBDMA_AHBSEQ_BURST_8
;
836 if (burst_ahb_width
< 4)
837 return TEGRA_APBDMA_AHBSEQ_BURST_1
;
838 else if (burst_ahb_width
< 8)
839 return TEGRA_APBDMA_AHBSEQ_BURST_4
;
841 return TEGRA_APBDMA_AHBSEQ_BURST_8
;
844 static int get_transfer_param(struct tegra_dma_channel
*tdc
,
845 enum dma_transfer_direction direction
, unsigned long *apb_addr
,
846 unsigned long *apb_seq
, unsigned long *csr
, unsigned int *burst_size
,
847 enum dma_slave_buswidth
*slave_bw
)
852 *apb_addr
= tdc
->dma_sconfig
.dst_addr
;
853 *apb_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.dst_addr_width
);
854 *burst_size
= tdc
->dma_sconfig
.dst_maxburst
;
855 *slave_bw
= tdc
->dma_sconfig
.dst_addr_width
;
856 *csr
= TEGRA_APBDMA_CSR_DIR
;
860 *apb_addr
= tdc
->dma_sconfig
.src_addr
;
861 *apb_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.src_addr_width
);
862 *burst_size
= tdc
->dma_sconfig
.src_maxburst
;
863 *slave_bw
= tdc
->dma_sconfig
.src_addr_width
;
868 dev_err(tdc2dev(tdc
), "Dma direction is not supported\n");
874 static struct dma_async_tx_descriptor
*tegra_dma_prep_slave_sg(
875 struct dma_chan
*dc
, struct scatterlist
*sgl
, unsigned int sg_len
,
876 enum dma_transfer_direction direction
, unsigned long flags
,
879 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
880 struct tegra_dma_desc
*dma_desc
;
882 struct scatterlist
*sg
;
883 unsigned long csr
, ahb_seq
, apb_ptr
, apb_seq
;
884 struct list_head req_list
;
885 struct tegra_dma_sg_req
*sg_req
= NULL
;
887 enum dma_slave_buswidth slave_bw
;
890 if (!tdc
->config_init
) {
891 dev_err(tdc2dev(tdc
), "dma channel is not configured\n");
895 dev_err(tdc2dev(tdc
), "Invalid segment length %d\n", sg_len
);
899 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &apb_seq
, &csr
,
900 &burst_size
, &slave_bw
);
904 INIT_LIST_HEAD(&req_list
);
906 ahb_seq
= TEGRA_APBDMA_AHBSEQ_INTR_ENB
;
907 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_WRAP_NONE
<<
908 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT
;
909 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32
;
911 csr
|= TEGRA_APBDMA_CSR_ONCE
| TEGRA_APBDMA_CSR_FLOW
;
912 csr
|= tdc
->dma_sconfig
.slave_id
<< TEGRA_APBDMA_CSR_REQ_SEL_SHIFT
;
913 if (flags
& DMA_PREP_INTERRUPT
)
914 csr
|= TEGRA_APBDMA_CSR_IE_EOC
;
916 apb_seq
|= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1
;
918 dma_desc
= tegra_dma_desc_get(tdc
);
920 dev_err(tdc2dev(tdc
), "Dma descriptors not available\n");
923 INIT_LIST_HEAD(&dma_desc
->tx_list
);
924 INIT_LIST_HEAD(&dma_desc
->cb_node
);
925 dma_desc
->cb_count
= 0;
926 dma_desc
->bytes_requested
= 0;
927 dma_desc
->bytes_transferred
= 0;
928 dma_desc
->dma_status
= DMA_IN_PROGRESS
;
930 /* Make transfer requests */
931 for_each_sg(sgl
, sg
, sg_len
, i
) {
934 mem
= sg_dma_address(sg
);
935 len
= sg_dma_len(sg
);
937 if ((len
& 3) || (mem
& 3) ||
938 (len
> tdc
->tdma
->chip_data
->max_dma_count
)) {
939 dev_err(tdc2dev(tdc
),
940 "Dma length/memory address is not supported\n");
941 tegra_dma_desc_put(tdc
, dma_desc
);
945 sg_req
= tegra_dma_sg_req_get(tdc
);
947 dev_err(tdc2dev(tdc
), "Dma sg-req not available\n");
948 tegra_dma_desc_put(tdc
, dma_desc
);
952 ahb_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
953 dma_desc
->bytes_requested
+= len
;
955 sg_req
->ch_regs
.apb_ptr
= apb_ptr
;
956 sg_req
->ch_regs
.ahb_ptr
= mem
;
957 sg_req
->ch_regs
.csr
= csr
| ((len
- 4) & 0xFFFC);
958 sg_req
->ch_regs
.apb_seq
= apb_seq
;
959 sg_req
->ch_regs
.ahb_seq
= ahb_seq
;
960 sg_req
->configured
= false;
961 sg_req
->last_sg
= false;
962 sg_req
->dma_desc
= dma_desc
;
963 sg_req
->req_len
= len
;
965 list_add_tail(&sg_req
->node
, &dma_desc
->tx_list
);
967 sg_req
->last_sg
= true;
968 if (flags
& DMA_CTRL_ACK
)
969 dma_desc
->txd
.flags
= DMA_CTRL_ACK
;
972 * Make sure that mode should not be conflicting with currently
975 if (!tdc
->isr_handler
) {
976 tdc
->isr_handler
= handle_once_dma_done
;
980 dev_err(tdc2dev(tdc
), "DMA configured in cyclic mode\n");
981 tegra_dma_desc_put(tdc
, dma_desc
);
986 return &dma_desc
->txd
;
989 struct dma_async_tx_descriptor
*tegra_dma_prep_dma_cyclic(
990 struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
991 size_t period_len
, enum dma_transfer_direction direction
,
992 unsigned long flags
, void *context
)
994 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
995 struct tegra_dma_desc
*dma_desc
= NULL
;
996 struct tegra_dma_sg_req
*sg_req
= NULL
;
997 unsigned long csr
, ahb_seq
, apb_ptr
, apb_seq
;
1000 dma_addr_t mem
= buf_addr
;
1002 enum dma_slave_buswidth slave_bw
;
1005 if (!buf_len
|| !period_len
) {
1006 dev_err(tdc2dev(tdc
), "Invalid buffer/period len\n");
1010 if (!tdc
->config_init
) {
1011 dev_err(tdc2dev(tdc
), "DMA slave is not configured\n");
1016 * We allow to take more number of requests till DMA is
1017 * not started. The driver will loop over all requests.
1018 * Once DMA is started then new requests can be queued only after
1019 * terminating the DMA.
1022 dev_err(tdc2dev(tdc
), "Request not allowed when dma running\n");
1027 * We only support cycle transfer when buf_len is multiple of
1030 if (buf_len
% period_len
) {
1031 dev_err(tdc2dev(tdc
), "buf_len is not multiple of period_len\n");
1036 if ((len
& 3) || (buf_addr
& 3) ||
1037 (len
> tdc
->tdma
->chip_data
->max_dma_count
)) {
1038 dev_err(tdc2dev(tdc
), "Req len/mem address is not correct\n");
1042 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &apb_seq
, &csr
,
1043 &burst_size
, &slave_bw
);
1048 ahb_seq
= TEGRA_APBDMA_AHBSEQ_INTR_ENB
;
1049 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_WRAP_NONE
<<
1050 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT
;
1051 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32
;
1053 csr
|= TEGRA_APBDMA_CSR_FLOW
| TEGRA_APBDMA_CSR_IE_EOC
;
1054 csr
|= tdc
->dma_sconfig
.slave_id
<< TEGRA_APBDMA_CSR_REQ_SEL_SHIFT
;
1056 apb_seq
|= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1
;
1058 dma_desc
= tegra_dma_desc_get(tdc
);
1060 dev_err(tdc2dev(tdc
), "not enough descriptors available\n");
1064 INIT_LIST_HEAD(&dma_desc
->tx_list
);
1065 INIT_LIST_HEAD(&dma_desc
->cb_node
);
1066 dma_desc
->cb_count
= 0;
1068 dma_desc
->bytes_transferred
= 0;
1069 dma_desc
->bytes_requested
= buf_len
;
1070 remain_len
= buf_len
;
1072 /* Split transfer equal to period size */
1073 while (remain_len
) {
1074 sg_req
= tegra_dma_sg_req_get(tdc
);
1076 dev_err(tdc2dev(tdc
), "Dma sg-req not available\n");
1077 tegra_dma_desc_put(tdc
, dma_desc
);
1081 ahb_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
1082 sg_req
->ch_regs
.apb_ptr
= apb_ptr
;
1083 sg_req
->ch_regs
.ahb_ptr
= mem
;
1084 sg_req
->ch_regs
.csr
= csr
| ((len
- 4) & 0xFFFC);
1085 sg_req
->ch_regs
.apb_seq
= apb_seq
;
1086 sg_req
->ch_regs
.ahb_seq
= ahb_seq
;
1087 sg_req
->configured
= false;
1088 sg_req
->half_done
= false;
1089 sg_req
->last_sg
= false;
1090 sg_req
->dma_desc
= dma_desc
;
1091 sg_req
->req_len
= len
;
1093 list_add_tail(&sg_req
->node
, &dma_desc
->tx_list
);
1097 sg_req
->last_sg
= true;
1098 dma_desc
->txd
.flags
= 0;
1101 * Make sure that mode should not be conflicting with currently
1104 if (!tdc
->isr_handler
) {
1105 tdc
->isr_handler
= handle_cont_sngl_cycle_dma_done
;
1109 dev_err(tdc2dev(tdc
), "DMA configuration conflict\n");
1110 tegra_dma_desc_put(tdc
, dma_desc
);
1115 return &dma_desc
->txd
;
1118 static int tegra_dma_alloc_chan_resources(struct dma_chan
*dc
)
1120 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1121 struct tegra_dma
*tdma
= tdc
->tdma
;
1124 dma_cookie_init(&tdc
->dma_chan
);
1125 tdc
->config_init
= false;
1126 ret
= clk_prepare_enable(tdma
->dma_clk
);
1128 dev_err(tdc2dev(tdc
), "clk_prepare_enable failed: %d\n", ret
);
1132 static void tegra_dma_free_chan_resources(struct dma_chan
*dc
)
1134 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1135 struct tegra_dma
*tdma
= tdc
->tdma
;
1137 struct tegra_dma_desc
*dma_desc
;
1138 struct tegra_dma_sg_req
*sg_req
;
1139 struct list_head dma_desc_list
;
1140 struct list_head sg_req_list
;
1141 unsigned long flags
;
1143 INIT_LIST_HEAD(&dma_desc_list
);
1144 INIT_LIST_HEAD(&sg_req_list
);
1146 dev_dbg(tdc2dev(tdc
), "Freeing channel %d\n", tdc
->id
);
1149 tegra_dma_terminate_all(dc
);
1151 spin_lock_irqsave(&tdc
->lock
, flags
);
1152 list_splice_init(&tdc
->pending_sg_req
, &sg_req_list
);
1153 list_splice_init(&tdc
->free_sg_req
, &sg_req_list
);
1154 list_splice_init(&tdc
->free_dma_desc
, &dma_desc_list
);
1155 INIT_LIST_HEAD(&tdc
->cb_desc
);
1156 tdc
->config_init
= false;
1157 spin_unlock_irqrestore(&tdc
->lock
, flags
);
1159 while (!list_empty(&dma_desc_list
)) {
1160 dma_desc
= list_first_entry(&dma_desc_list
,
1161 typeof(*dma_desc
), node
);
1162 list_del(&dma_desc
->node
);
1166 while (!list_empty(&sg_req_list
)) {
1167 sg_req
= list_first_entry(&sg_req_list
, typeof(*sg_req
), node
);
1168 list_del(&sg_req
->node
);
1171 clk_disable_unprepare(tdma
->dma_clk
);
1174 /* Tegra20 specific DMA controller information */
1175 static const struct tegra_dma_chip_data tegra20_dma_chip_data
= {
1177 .max_dma_count
= 1024UL * 64,
1180 #if defined(CONFIG_OF)
1181 /* Tegra30 specific DMA controller information */
1182 static const struct tegra_dma_chip_data tegra30_dma_chip_data
= {
1184 .max_dma_count
= 1024UL * 64,
1187 static const struct of_device_id tegra_dma_of_match
[] __devinitconst
= {
1189 .compatible
= "nvidia,tegra30-apbdma",
1190 .data
= &tegra30_dma_chip_data
,
1192 .compatible
= "nvidia,tegra20-apbdma",
1193 .data
= &tegra20_dma_chip_data
,
1197 MODULE_DEVICE_TABLE(of
, tegra_dma_of_match
);
1200 static int __devinit
tegra_dma_probe(struct platform_device
*pdev
)
1202 struct resource
*res
;
1203 struct tegra_dma
*tdma
;
1206 const struct tegra_dma_chip_data
*cdata
= NULL
;
1208 if (pdev
->dev
.of_node
) {
1209 const struct of_device_id
*match
;
1210 match
= of_match_device(of_match_ptr(tegra_dma_of_match
),
1213 dev_err(&pdev
->dev
, "Error: No device match found\n");
1216 cdata
= match
->data
;
1218 /* If no device tree then fallback to tegra20 */
1219 cdata
= &tegra20_dma_chip_data
;
1222 tdma
= devm_kzalloc(&pdev
->dev
, sizeof(*tdma
) + cdata
->nr_channels
*
1223 sizeof(struct tegra_dma_channel
), GFP_KERNEL
);
1225 dev_err(&pdev
->dev
, "Error: memory allocation failed\n");
1229 tdma
->dev
= &pdev
->dev
;
1230 tdma
->chip_data
= cdata
;
1231 platform_set_drvdata(pdev
, tdma
);
1233 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1235 dev_err(&pdev
->dev
, "No mem resource for DMA\n");
1239 tdma
->base_addr
= devm_request_and_ioremap(&pdev
->dev
, res
);
1240 if (!tdma
->base_addr
) {
1242 "Cannot request memregion/iomap dma address\n");
1243 return -EADDRNOTAVAIL
;
1246 tdma
->dma_clk
= devm_clk_get(&pdev
->dev
, NULL
);
1247 if (IS_ERR(tdma
->dma_clk
)) {
1248 dev_err(&pdev
->dev
, "Error: Missing controller clock\n");
1249 return PTR_ERR(tdma
->dma_clk
);
1252 spin_lock_init(&tdma
->global_lock
);
1254 pm_runtime_enable(&pdev
->dev
);
1255 if (!pm_runtime_enabled(&pdev
->dev
)) {
1256 ret
= tegra_dma_runtime_resume(&pdev
->dev
);
1258 dev_err(&pdev
->dev
, "dma_runtime_resume failed %d\n",
1260 goto err_pm_disable
;
1264 /* Enable clock before accessing registers */
1265 ret
= clk_prepare_enable(tdma
->dma_clk
);
1267 dev_err(&pdev
->dev
, "clk_prepare_enable failed: %d\n", ret
);
1268 goto err_pm_disable
;
1271 /* Reset DMA controller */
1272 tegra_periph_reset_assert(tdma
->dma_clk
);
1274 tegra_periph_reset_deassert(tdma
->dma_clk
);
1276 /* Enable global DMA registers */
1277 tdma_write(tdma
, TEGRA_APBDMA_GENERAL
, TEGRA_APBDMA_GENERAL_ENABLE
);
1278 tdma_write(tdma
, TEGRA_APBDMA_CONTROL
, 0);
1279 tdma_write(tdma
, TEGRA_APBDMA_IRQ_MASK_SET
, 0xFFFFFFFFul
);
1281 clk_disable_unprepare(tdma
->dma_clk
);
1283 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
1284 for (i
= 0; i
< cdata
->nr_channels
; i
++) {
1285 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1287 tdc
->chan_base_offset
= TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET
+
1288 i
* TEGRA_APBDMA_CHANNEL_REGISTER_SIZE
;
1290 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, i
);
1293 dev_err(&pdev
->dev
, "No irq resource for chan %d\n", i
);
1296 tdc
->irq
= res
->start
;
1297 snprintf(tdc
->name
, sizeof(tdc
->name
), "apbdma.%d", i
);
1298 ret
= devm_request_irq(&pdev
->dev
, tdc
->irq
,
1299 tegra_dma_isr
, 0, tdc
->name
, tdc
);
1302 "request_irq failed with err %d channel %d\n",
1307 tdc
->dma_chan
.device
= &tdma
->dma_dev
;
1308 dma_cookie_init(&tdc
->dma_chan
);
1309 list_add_tail(&tdc
->dma_chan
.device_node
,
1310 &tdma
->dma_dev
.channels
);
1314 tasklet_init(&tdc
->tasklet
, tegra_dma_tasklet
,
1315 (unsigned long)tdc
);
1316 spin_lock_init(&tdc
->lock
);
1318 INIT_LIST_HEAD(&tdc
->pending_sg_req
);
1319 INIT_LIST_HEAD(&tdc
->free_sg_req
);
1320 INIT_LIST_HEAD(&tdc
->free_dma_desc
);
1321 INIT_LIST_HEAD(&tdc
->cb_desc
);
1324 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
1325 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
1326 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
1328 tdma
->dma_dev
.dev
= &pdev
->dev
;
1329 tdma
->dma_dev
.device_alloc_chan_resources
=
1330 tegra_dma_alloc_chan_resources
;
1331 tdma
->dma_dev
.device_free_chan_resources
=
1332 tegra_dma_free_chan_resources
;
1333 tdma
->dma_dev
.device_prep_slave_sg
= tegra_dma_prep_slave_sg
;
1334 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_dma_prep_dma_cyclic
;
1335 tdma
->dma_dev
.device_control
= tegra_dma_device_control
;
1336 tdma
->dma_dev
.device_tx_status
= tegra_dma_tx_status
;
1337 tdma
->dma_dev
.device_issue_pending
= tegra_dma_issue_pending
;
1339 ret
= dma_async_device_register(&tdma
->dma_dev
);
1342 "Tegra20 APB DMA driver registration failed %d\n", ret
);
1346 dev_info(&pdev
->dev
, "Tegra20 APB DMA driver register %d channels\n",
1347 cdata
->nr_channels
);
1352 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1353 tasklet_kill(&tdc
->tasklet
);
1357 pm_runtime_disable(&pdev
->dev
);
1358 if (!pm_runtime_status_suspended(&pdev
->dev
))
1359 tegra_dma_runtime_suspend(&pdev
->dev
);
1363 static int __devexit
tegra_dma_remove(struct platform_device
*pdev
)
1365 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1367 struct tegra_dma_channel
*tdc
;
1369 dma_async_device_unregister(&tdma
->dma_dev
);
1371 for (i
= 0; i
< tdma
->chip_data
->nr_channels
; ++i
) {
1372 tdc
= &tdma
->channels
[i
];
1373 tasklet_kill(&tdc
->tasklet
);
1376 pm_runtime_disable(&pdev
->dev
);
1377 if (!pm_runtime_status_suspended(&pdev
->dev
))
1378 tegra_dma_runtime_suspend(&pdev
->dev
);
1383 static int tegra_dma_runtime_suspend(struct device
*dev
)
1385 struct platform_device
*pdev
= to_platform_device(dev
);
1386 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1388 clk_disable_unprepare(tdma
->dma_clk
);
1392 static int tegra_dma_runtime_resume(struct device
*dev
)
1394 struct platform_device
*pdev
= to_platform_device(dev
);
1395 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1398 ret
= clk_prepare_enable(tdma
->dma_clk
);
1400 dev_err(dev
, "clk_enable failed: %d\n", ret
);
1406 static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst
= {
1407 #ifdef CONFIG_PM_RUNTIME
1408 .runtime_suspend
= tegra_dma_runtime_suspend
,
1409 .runtime_resume
= tegra_dma_runtime_resume
,
1413 static struct platform_driver tegra_dmac_driver
= {
1415 .name
= "tegra-apbdma",
1416 .owner
= THIS_MODULE
,
1417 .pm
= &tegra_dma_dev_pm_ops
,
1418 .of_match_table
= of_match_ptr(tegra_dma_of_match
),
1420 .probe
= tegra_dma_probe
,
1421 .remove
= __devexit_p(tegra_dma_remove
),
1424 module_platform_driver(tegra_dmac_driver
);
1426 MODULE_ALIAS("platform:tegra20-apbdma");
1427 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1428 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1429 MODULE_LICENSE("GPL v2");