2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/clk.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
29 #include <linux/module.h>
31 #include <linux/of_device.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/slab.h>
35 #include <linux/clk/tegra.h>
37 #include "dmaengine.h"
39 #define TEGRA_APBDMA_GENERAL 0x0
40 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
42 #define TEGRA_APBDMA_CONTROL 0x010
43 #define TEGRA_APBDMA_IRQ_MASK 0x01c
44 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
47 #define TEGRA_APBDMA_CHAN_CSR 0x00
48 #define TEGRA_APBDMA_CSR_ENB BIT(31)
49 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
50 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
51 #define TEGRA_APBDMA_CSR_DIR BIT(28)
52 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
53 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
54 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
55 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
58 #define TEGRA_APBDMA_CHAN_STATUS 0x004
59 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
60 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
61 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
62 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
63 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
64 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
66 #define TEGRA_APBDMA_CHAN_CSRE 0x00C
67 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
69 /* AHB memory address */
70 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
72 /* AHB sequence register */
73 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
74 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
76 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
77 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
78 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
79 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
80 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
81 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
82 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
83 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
84 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
85 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
86 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
89 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
91 /* APB sequence register */
92 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
94 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
95 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
96 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
97 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
98 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
99 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
102 * If any burst is in flight and DMA paused then this is the time to complete
103 * on-flight burst and update DMA status register.
105 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
107 /* Channel base address offset from APBDMA base address */
108 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
110 /* DMA channel register space size */
111 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
116 * tegra_dma_chip_data Tegra chip specific DMA data
117 * @nr_channels: Number of channels available in the controller.
118 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
119 * @support_channel_pause: Support channel wise pause of dma.
121 struct tegra_dma_chip_data
{
124 bool support_channel_pause
;
127 /* DMA channel registers */
128 struct tegra_dma_channel_regs
{
130 unsigned long ahb_ptr
;
131 unsigned long apb_ptr
;
132 unsigned long ahb_seq
;
133 unsigned long apb_seq
;
137 * tegra_dma_sg_req: Dma request details to configure hardware. This
138 * contains the details for one transfer to configure DMA hw.
139 * The client's request for data transfer can be broken into multiple
140 * sub-transfer as per requester details and hw support.
141 * This sub transfer get added in the list of transfer and point to Tegra
142 * DMA descriptor which manages the transfer details.
144 struct tegra_dma_sg_req
{
145 struct tegra_dma_channel_regs ch_regs
;
150 struct list_head node
;
151 struct tegra_dma_desc
*dma_desc
;
155 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
156 * This descriptor keep track of transfer status, callbacks and request
159 struct tegra_dma_desc
{
160 struct dma_async_tx_descriptor txd
;
162 int bytes_transferred
;
163 enum dma_status dma_status
;
164 struct list_head node
;
165 struct list_head tx_list
;
166 struct list_head cb_node
;
170 struct tegra_dma_channel
;
172 typedef void (*dma_isr_handler
)(struct tegra_dma_channel
*tdc
,
175 /* tegra_dma_channel: Channel specific information */
176 struct tegra_dma_channel
{
177 struct dma_chan dma_chan
;
182 unsigned long chan_base_offset
;
185 struct tegra_dma
*tdma
;
188 /* Different lists for managing the requests */
189 struct list_head free_sg_req
;
190 struct list_head pending_sg_req
;
191 struct list_head free_dma_desc
;
192 struct list_head cb_desc
;
194 /* ISR handler and tasklet for bottom half of isr handling */
195 dma_isr_handler isr_handler
;
196 struct tasklet_struct tasklet
;
197 dma_async_tx_callback callback
;
198 void *callback_param
;
200 /* Channel-slave specific configuration */
201 struct dma_slave_config dma_sconfig
;
204 /* tegra_dma: Tegra DMA specific information */
206 struct dma_device dma_dev
;
209 spinlock_t global_lock
;
210 void __iomem
*base_addr
;
211 const struct tegra_dma_chip_data
*chip_data
;
213 /* Some register need to be cache before suspend */
216 /* Last member of the structure */
217 struct tegra_dma_channel channels
[0];
220 static inline void tdma_write(struct tegra_dma
*tdma
, u32 reg
, u32 val
)
222 writel(val
, tdma
->base_addr
+ reg
);
225 static inline u32
tdma_read(struct tegra_dma
*tdma
, u32 reg
)
227 return readl(tdma
->base_addr
+ reg
);
230 static inline void tdc_write(struct tegra_dma_channel
*tdc
,
233 writel(val
, tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
236 static inline u32
tdc_read(struct tegra_dma_channel
*tdc
, u32 reg
)
238 return readl(tdc
->tdma
->base_addr
+ tdc
->chan_base_offset
+ reg
);
241 static inline struct tegra_dma_channel
*to_tegra_dma_chan(struct dma_chan
*dc
)
243 return container_of(dc
, struct tegra_dma_channel
, dma_chan
);
246 static inline struct tegra_dma_desc
*txd_to_tegra_dma_desc(
247 struct dma_async_tx_descriptor
*td
)
249 return container_of(td
, struct tegra_dma_desc
, txd
);
252 static inline struct device
*tdc2dev(struct tegra_dma_channel
*tdc
)
254 return &tdc
->dma_chan
.dev
->device
;
257 static dma_cookie_t
tegra_dma_tx_submit(struct dma_async_tx_descriptor
*tx
);
258 static int tegra_dma_runtime_suspend(struct device
*dev
);
259 static int tegra_dma_runtime_resume(struct device
*dev
);
261 /* Get DMA desc from free list, if not there then allocate it. */
262 static struct tegra_dma_desc
*tegra_dma_desc_get(
263 struct tegra_dma_channel
*tdc
)
265 struct tegra_dma_desc
*dma_desc
;
268 spin_lock_irqsave(&tdc
->lock
, flags
);
270 /* Do not allocate if desc are waiting for ack */
271 list_for_each_entry(dma_desc
, &tdc
->free_dma_desc
, node
) {
272 if (async_tx_test_ack(&dma_desc
->txd
)) {
273 list_del(&dma_desc
->node
);
274 spin_unlock_irqrestore(&tdc
->lock
, flags
);
275 dma_desc
->txd
.flags
= 0;
280 spin_unlock_irqrestore(&tdc
->lock
, flags
);
282 /* Allocate DMA desc */
283 dma_desc
= kzalloc(sizeof(*dma_desc
), GFP_ATOMIC
);
285 dev_err(tdc2dev(tdc
), "dma_desc alloc failed\n");
289 dma_async_tx_descriptor_init(&dma_desc
->txd
, &tdc
->dma_chan
);
290 dma_desc
->txd
.tx_submit
= tegra_dma_tx_submit
;
291 dma_desc
->txd
.flags
= 0;
295 static void tegra_dma_desc_put(struct tegra_dma_channel
*tdc
,
296 struct tegra_dma_desc
*dma_desc
)
300 spin_lock_irqsave(&tdc
->lock
, flags
);
301 if (!list_empty(&dma_desc
->tx_list
))
302 list_splice_init(&dma_desc
->tx_list
, &tdc
->free_sg_req
);
303 list_add_tail(&dma_desc
->node
, &tdc
->free_dma_desc
);
304 spin_unlock_irqrestore(&tdc
->lock
, flags
);
307 static struct tegra_dma_sg_req
*tegra_dma_sg_req_get(
308 struct tegra_dma_channel
*tdc
)
310 struct tegra_dma_sg_req
*sg_req
= NULL
;
313 spin_lock_irqsave(&tdc
->lock
, flags
);
314 if (!list_empty(&tdc
->free_sg_req
)) {
315 sg_req
= list_first_entry(&tdc
->free_sg_req
,
316 typeof(*sg_req
), node
);
317 list_del(&sg_req
->node
);
318 spin_unlock_irqrestore(&tdc
->lock
, flags
);
321 spin_unlock_irqrestore(&tdc
->lock
, flags
);
323 sg_req
= kzalloc(sizeof(struct tegra_dma_sg_req
), GFP_ATOMIC
);
325 dev_err(tdc2dev(tdc
), "sg_req alloc failed\n");
329 static int tegra_dma_slave_config(struct dma_chan
*dc
,
330 struct dma_slave_config
*sconfig
)
332 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
334 if (!list_empty(&tdc
->pending_sg_req
)) {
335 dev_err(tdc2dev(tdc
), "Configuration not allowed\n");
339 memcpy(&tdc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
340 tdc
->config_init
= true;
344 static void tegra_dma_global_pause(struct tegra_dma_channel
*tdc
,
345 bool wait_for_burst_complete
)
347 struct tegra_dma
*tdma
= tdc
->tdma
;
349 spin_lock(&tdma
->global_lock
);
350 tdma_write(tdma
, TEGRA_APBDMA_GENERAL
, 0);
351 if (wait_for_burst_complete
)
352 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME
);
355 static void tegra_dma_global_resume(struct tegra_dma_channel
*tdc
)
357 struct tegra_dma
*tdma
= tdc
->tdma
;
359 tdma_write(tdma
, TEGRA_APBDMA_GENERAL
, TEGRA_APBDMA_GENERAL_ENABLE
);
360 spin_unlock(&tdma
->global_lock
);
363 static void tegra_dma_pause(struct tegra_dma_channel
*tdc
,
364 bool wait_for_burst_complete
)
366 struct tegra_dma
*tdma
= tdc
->tdma
;
368 if (tdma
->chip_data
->support_channel_pause
) {
369 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSRE
,
370 TEGRA_APBDMA_CHAN_CSRE_PAUSE
);
371 if (wait_for_burst_complete
)
372 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME
);
374 tegra_dma_global_pause(tdc
, wait_for_burst_complete
);
378 static void tegra_dma_resume(struct tegra_dma_channel
*tdc
)
380 struct tegra_dma
*tdma
= tdc
->tdma
;
382 if (tdma
->chip_data
->support_channel_pause
) {
383 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSRE
, 0);
385 tegra_dma_global_resume(tdc
);
389 static void tegra_dma_stop(struct tegra_dma_channel
*tdc
)
394 /* Disable interrupts */
395 csr
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_CSR
);
396 csr
&= ~TEGRA_APBDMA_CSR_IE_EOC
;
397 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
, csr
);
400 csr
&= ~TEGRA_APBDMA_CSR_ENB
;
401 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
, csr
);
403 /* Clear interrupt status if it is there */
404 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
405 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
406 dev_dbg(tdc2dev(tdc
), "%s():clearing interrupt\n", __func__
);
407 tdc_write(tdc
, TEGRA_APBDMA_CHAN_STATUS
, status
);
412 static void tegra_dma_start(struct tegra_dma_channel
*tdc
,
413 struct tegra_dma_sg_req
*sg_req
)
415 struct tegra_dma_channel_regs
*ch_regs
= &sg_req
->ch_regs
;
417 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
, ch_regs
->csr
);
418 tdc_write(tdc
, TEGRA_APBDMA_CHAN_APBSEQ
, ch_regs
->apb_seq
);
419 tdc_write(tdc
, TEGRA_APBDMA_CHAN_APBPTR
, ch_regs
->apb_ptr
);
420 tdc_write(tdc
, TEGRA_APBDMA_CHAN_AHBSEQ
, ch_regs
->ahb_seq
);
421 tdc_write(tdc
, TEGRA_APBDMA_CHAN_AHBPTR
, ch_regs
->ahb_ptr
);
424 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
,
425 ch_regs
->csr
| TEGRA_APBDMA_CSR_ENB
);
428 static void tegra_dma_configure_for_next(struct tegra_dma_channel
*tdc
,
429 struct tegra_dma_sg_req
*nsg_req
)
431 unsigned long status
;
434 * The DMA controller reloads the new configuration for next transfer
435 * after last burst of current transfer completes.
436 * If there is no IEC status then this makes sure that last burst
437 * has not be completed. There may be case that last burst is on
438 * flight and so it can complete but because DMA is paused, it
439 * will not generates interrupt as well as not reload the new
441 * If there is already IEC status then interrupt handler need to
442 * load new configuration.
444 tegra_dma_pause(tdc
, false);
445 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
448 * If interrupt is pending then do nothing as the ISR will handle
449 * the programing for new request.
451 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
452 dev_err(tdc2dev(tdc
),
453 "Skipping new configuration as interrupt is pending\n");
454 tegra_dma_resume(tdc
);
458 /* Safe to program new configuration */
459 tdc_write(tdc
, TEGRA_APBDMA_CHAN_APBPTR
, nsg_req
->ch_regs
.apb_ptr
);
460 tdc_write(tdc
, TEGRA_APBDMA_CHAN_AHBPTR
, nsg_req
->ch_regs
.ahb_ptr
);
461 tdc_write(tdc
, TEGRA_APBDMA_CHAN_CSR
,
462 nsg_req
->ch_regs
.csr
| TEGRA_APBDMA_CSR_ENB
);
463 nsg_req
->configured
= true;
465 tegra_dma_resume(tdc
);
468 static void tdc_start_head_req(struct tegra_dma_channel
*tdc
)
470 struct tegra_dma_sg_req
*sg_req
;
472 if (list_empty(&tdc
->pending_sg_req
))
475 sg_req
= list_first_entry(&tdc
->pending_sg_req
,
476 typeof(*sg_req
), node
);
477 tegra_dma_start(tdc
, sg_req
);
478 sg_req
->configured
= true;
482 static void tdc_configure_next_head_desc(struct tegra_dma_channel
*tdc
)
484 struct tegra_dma_sg_req
*hsgreq
;
485 struct tegra_dma_sg_req
*hnsgreq
;
487 if (list_empty(&tdc
->pending_sg_req
))
490 hsgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*hsgreq
), node
);
491 if (!list_is_last(&hsgreq
->node
, &tdc
->pending_sg_req
)) {
492 hnsgreq
= list_first_entry(&hsgreq
->node
,
493 typeof(*hnsgreq
), node
);
494 tegra_dma_configure_for_next(tdc
, hnsgreq
);
498 static inline int get_current_xferred_count(struct tegra_dma_channel
*tdc
,
499 struct tegra_dma_sg_req
*sg_req
, unsigned long status
)
501 return sg_req
->req_len
- (status
& TEGRA_APBDMA_STATUS_COUNT_MASK
) - 4;
504 static void tegra_dma_abort_all(struct tegra_dma_channel
*tdc
)
506 struct tegra_dma_sg_req
*sgreq
;
507 struct tegra_dma_desc
*dma_desc
;
509 while (!list_empty(&tdc
->pending_sg_req
)) {
510 sgreq
= list_first_entry(&tdc
->pending_sg_req
,
511 typeof(*sgreq
), node
);
512 list_move_tail(&sgreq
->node
, &tdc
->free_sg_req
);
513 if (sgreq
->last_sg
) {
514 dma_desc
= sgreq
->dma_desc
;
515 dma_desc
->dma_status
= DMA_ERROR
;
516 list_add_tail(&dma_desc
->node
, &tdc
->free_dma_desc
);
518 /* Add in cb list if it is not there. */
519 if (!dma_desc
->cb_count
)
520 list_add_tail(&dma_desc
->cb_node
,
522 dma_desc
->cb_count
++;
525 tdc
->isr_handler
= NULL
;
528 static bool handle_continuous_head_request(struct tegra_dma_channel
*tdc
,
529 struct tegra_dma_sg_req
*last_sg_req
, bool to_terminate
)
531 struct tegra_dma_sg_req
*hsgreq
= NULL
;
533 if (list_empty(&tdc
->pending_sg_req
)) {
534 dev_err(tdc2dev(tdc
), "Dma is running without req\n");
540 * Check that head req on list should be in flight.
541 * If it is not in flight then abort transfer as
542 * looping of transfer can not continue.
544 hsgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*hsgreq
), node
);
545 if (!hsgreq
->configured
) {
547 dev_err(tdc2dev(tdc
), "Error in dma transfer, aborting dma\n");
548 tegra_dma_abort_all(tdc
);
552 /* Configure next request */
554 tdc_configure_next_head_desc(tdc
);
558 static void handle_once_dma_done(struct tegra_dma_channel
*tdc
,
561 struct tegra_dma_sg_req
*sgreq
;
562 struct tegra_dma_desc
*dma_desc
;
565 sgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*sgreq
), node
);
566 dma_desc
= sgreq
->dma_desc
;
567 dma_desc
->bytes_transferred
+= sgreq
->req_len
;
569 list_del(&sgreq
->node
);
570 if (sgreq
->last_sg
) {
571 dma_desc
->dma_status
= DMA_SUCCESS
;
572 dma_cookie_complete(&dma_desc
->txd
);
573 if (!dma_desc
->cb_count
)
574 list_add_tail(&dma_desc
->cb_node
, &tdc
->cb_desc
);
575 dma_desc
->cb_count
++;
576 list_add_tail(&dma_desc
->node
, &tdc
->free_dma_desc
);
578 list_add_tail(&sgreq
->node
, &tdc
->free_sg_req
);
580 /* Do not start DMA if it is going to be terminate */
581 if (to_terminate
|| list_empty(&tdc
->pending_sg_req
))
584 tdc_start_head_req(tdc
);
588 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel
*tdc
,
591 struct tegra_dma_sg_req
*sgreq
;
592 struct tegra_dma_desc
*dma_desc
;
595 sgreq
= list_first_entry(&tdc
->pending_sg_req
, typeof(*sgreq
), node
);
596 dma_desc
= sgreq
->dma_desc
;
597 dma_desc
->bytes_transferred
+= sgreq
->req_len
;
599 /* Callback need to be call */
600 if (!dma_desc
->cb_count
)
601 list_add_tail(&dma_desc
->cb_node
, &tdc
->cb_desc
);
602 dma_desc
->cb_count
++;
604 /* If not last req then put at end of pending list */
605 if (!list_is_last(&sgreq
->node
, &tdc
->pending_sg_req
)) {
606 list_move_tail(&sgreq
->node
, &tdc
->pending_sg_req
);
607 sgreq
->configured
= false;
608 st
= handle_continuous_head_request(tdc
, sgreq
, to_terminate
);
610 dma_desc
->dma_status
= DMA_ERROR
;
615 static void tegra_dma_tasklet(unsigned long data
)
617 struct tegra_dma_channel
*tdc
= (struct tegra_dma_channel
*)data
;
618 dma_async_tx_callback callback
= NULL
;
619 void *callback_param
= NULL
;
620 struct tegra_dma_desc
*dma_desc
;
624 spin_lock_irqsave(&tdc
->lock
, flags
);
625 while (!list_empty(&tdc
->cb_desc
)) {
626 dma_desc
= list_first_entry(&tdc
->cb_desc
,
627 typeof(*dma_desc
), cb_node
);
628 list_del(&dma_desc
->cb_node
);
629 callback
= dma_desc
->txd
.callback
;
630 callback_param
= dma_desc
->txd
.callback_param
;
631 cb_count
= dma_desc
->cb_count
;
632 dma_desc
->cb_count
= 0;
633 spin_unlock_irqrestore(&tdc
->lock
, flags
);
634 while (cb_count
-- && callback
)
635 callback(callback_param
);
636 spin_lock_irqsave(&tdc
->lock
, flags
);
638 spin_unlock_irqrestore(&tdc
->lock
, flags
);
641 static irqreturn_t
tegra_dma_isr(int irq
, void *dev_id
)
643 struct tegra_dma_channel
*tdc
= dev_id
;
644 unsigned long status
;
647 spin_lock_irqsave(&tdc
->lock
, flags
);
649 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
650 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
651 tdc_write(tdc
, TEGRA_APBDMA_CHAN_STATUS
, status
);
652 tdc
->isr_handler(tdc
, false);
653 tasklet_schedule(&tdc
->tasklet
);
654 spin_unlock_irqrestore(&tdc
->lock
, flags
);
658 spin_unlock_irqrestore(&tdc
->lock
, flags
);
659 dev_info(tdc2dev(tdc
),
660 "Interrupt already served status 0x%08lx\n", status
);
664 static dma_cookie_t
tegra_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
666 struct tegra_dma_desc
*dma_desc
= txd_to_tegra_dma_desc(txd
);
667 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(txd
->chan
);
671 spin_lock_irqsave(&tdc
->lock
, flags
);
672 dma_desc
->dma_status
= DMA_IN_PROGRESS
;
673 cookie
= dma_cookie_assign(&dma_desc
->txd
);
674 list_splice_tail_init(&dma_desc
->tx_list
, &tdc
->pending_sg_req
);
675 spin_unlock_irqrestore(&tdc
->lock
, flags
);
679 static void tegra_dma_issue_pending(struct dma_chan
*dc
)
681 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
684 spin_lock_irqsave(&tdc
->lock
, flags
);
685 if (list_empty(&tdc
->pending_sg_req
)) {
686 dev_err(tdc2dev(tdc
), "No DMA request\n");
690 tdc_start_head_req(tdc
);
692 /* Continuous single mode: Configure next req */
695 * Wait for 1 burst time for configure DMA for
698 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME
);
699 tdc_configure_next_head_desc(tdc
);
703 spin_unlock_irqrestore(&tdc
->lock
, flags
);
707 static void tegra_dma_terminate_all(struct dma_chan
*dc
)
709 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
710 struct tegra_dma_sg_req
*sgreq
;
711 struct tegra_dma_desc
*dma_desc
;
713 unsigned long status
;
716 spin_lock_irqsave(&tdc
->lock
, flags
);
717 if (list_empty(&tdc
->pending_sg_req
)) {
718 spin_unlock_irqrestore(&tdc
->lock
, flags
);
725 /* Pause DMA before checking the queue status */
726 tegra_dma_pause(tdc
, true);
728 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
729 if (status
& TEGRA_APBDMA_STATUS_ISE_EOC
) {
730 dev_dbg(tdc2dev(tdc
), "%s():handling isr\n", __func__
);
731 tdc
->isr_handler(tdc
, true);
732 status
= tdc_read(tdc
, TEGRA_APBDMA_CHAN_STATUS
);
735 was_busy
= tdc
->busy
;
738 if (!list_empty(&tdc
->pending_sg_req
) && was_busy
) {
739 sgreq
= list_first_entry(&tdc
->pending_sg_req
,
740 typeof(*sgreq
), node
);
741 sgreq
->dma_desc
->bytes_transferred
+=
742 get_current_xferred_count(tdc
, sgreq
, status
);
744 tegra_dma_resume(tdc
);
747 tegra_dma_abort_all(tdc
);
749 while (!list_empty(&tdc
->cb_desc
)) {
750 dma_desc
= list_first_entry(&tdc
->cb_desc
,
751 typeof(*dma_desc
), cb_node
);
752 list_del(&dma_desc
->cb_node
);
753 dma_desc
->cb_count
= 0;
755 spin_unlock_irqrestore(&tdc
->lock
, flags
);
758 static enum dma_status
tegra_dma_tx_status(struct dma_chan
*dc
,
759 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
761 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
762 struct tegra_dma_desc
*dma_desc
;
763 struct tegra_dma_sg_req
*sg_req
;
766 unsigned int residual
;
768 spin_lock_irqsave(&tdc
->lock
, flags
);
770 ret
= dma_cookie_status(dc
, cookie
, txstate
);
771 if (ret
== DMA_SUCCESS
) {
772 spin_unlock_irqrestore(&tdc
->lock
, flags
);
776 /* Check on wait_ack desc status */
777 list_for_each_entry(dma_desc
, &tdc
->free_dma_desc
, node
) {
778 if (dma_desc
->txd
.cookie
== cookie
) {
779 residual
= dma_desc
->bytes_requested
-
780 (dma_desc
->bytes_transferred
%
781 dma_desc
->bytes_requested
);
782 dma_set_residue(txstate
, residual
);
783 ret
= dma_desc
->dma_status
;
784 spin_unlock_irqrestore(&tdc
->lock
, flags
);
789 /* Check in pending list */
790 list_for_each_entry(sg_req
, &tdc
->pending_sg_req
, node
) {
791 dma_desc
= sg_req
->dma_desc
;
792 if (dma_desc
->txd
.cookie
== cookie
) {
793 residual
= dma_desc
->bytes_requested
-
794 (dma_desc
->bytes_transferred
%
795 dma_desc
->bytes_requested
);
796 dma_set_residue(txstate
, residual
);
797 ret
= dma_desc
->dma_status
;
798 spin_unlock_irqrestore(&tdc
->lock
, flags
);
803 dev_dbg(tdc2dev(tdc
), "cookie %d does not found\n", cookie
);
804 spin_unlock_irqrestore(&tdc
->lock
, flags
);
808 static int tegra_dma_device_control(struct dma_chan
*dc
, enum dma_ctrl_cmd cmd
,
812 case DMA_SLAVE_CONFIG
:
813 return tegra_dma_slave_config(dc
,
814 (struct dma_slave_config
*)arg
);
816 case DMA_TERMINATE_ALL
:
817 tegra_dma_terminate_all(dc
);
827 static inline int get_bus_width(struct tegra_dma_channel
*tdc
,
828 enum dma_slave_buswidth slave_bw
)
831 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
832 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8
;
833 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
834 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16
;
835 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
836 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32
;
837 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
838 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64
;
840 dev_warn(tdc2dev(tdc
),
841 "slave bw is not supported, using 32bits\n");
842 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32
;
846 static inline int get_burst_size(struct tegra_dma_channel
*tdc
,
847 u32 burst_size
, enum dma_slave_buswidth slave_bw
, int len
)
853 * burst_size from client is in terms of the bus_width.
854 * convert them into AHB memory width which is 4 byte.
856 burst_byte
= burst_size
* slave_bw
;
857 burst_ahb_width
= burst_byte
/ 4;
859 /* If burst size is 0 then calculate the burst size based on length */
860 if (!burst_ahb_width
) {
862 return TEGRA_APBDMA_AHBSEQ_BURST_1
;
863 else if ((len
>> 4) & 0x1)
864 return TEGRA_APBDMA_AHBSEQ_BURST_4
;
866 return TEGRA_APBDMA_AHBSEQ_BURST_8
;
868 if (burst_ahb_width
< 4)
869 return TEGRA_APBDMA_AHBSEQ_BURST_1
;
870 else if (burst_ahb_width
< 8)
871 return TEGRA_APBDMA_AHBSEQ_BURST_4
;
873 return TEGRA_APBDMA_AHBSEQ_BURST_8
;
876 static int get_transfer_param(struct tegra_dma_channel
*tdc
,
877 enum dma_transfer_direction direction
, unsigned long *apb_addr
,
878 unsigned long *apb_seq
, unsigned long *csr
, unsigned int *burst_size
,
879 enum dma_slave_buswidth
*slave_bw
)
884 *apb_addr
= tdc
->dma_sconfig
.dst_addr
;
885 *apb_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.dst_addr_width
);
886 *burst_size
= tdc
->dma_sconfig
.dst_maxburst
;
887 *slave_bw
= tdc
->dma_sconfig
.dst_addr_width
;
888 *csr
= TEGRA_APBDMA_CSR_DIR
;
892 *apb_addr
= tdc
->dma_sconfig
.src_addr
;
893 *apb_seq
= get_bus_width(tdc
, tdc
->dma_sconfig
.src_addr_width
);
894 *burst_size
= tdc
->dma_sconfig
.src_maxburst
;
895 *slave_bw
= tdc
->dma_sconfig
.src_addr_width
;
900 dev_err(tdc2dev(tdc
), "Dma direction is not supported\n");
906 static struct dma_async_tx_descriptor
*tegra_dma_prep_slave_sg(
907 struct dma_chan
*dc
, struct scatterlist
*sgl
, unsigned int sg_len
,
908 enum dma_transfer_direction direction
, unsigned long flags
,
911 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
912 struct tegra_dma_desc
*dma_desc
;
914 struct scatterlist
*sg
;
915 unsigned long csr
, ahb_seq
, apb_ptr
, apb_seq
;
916 struct list_head req_list
;
917 struct tegra_dma_sg_req
*sg_req
= NULL
;
919 enum dma_slave_buswidth slave_bw
;
922 if (!tdc
->config_init
) {
923 dev_err(tdc2dev(tdc
), "dma channel is not configured\n");
927 dev_err(tdc2dev(tdc
), "Invalid segment length %d\n", sg_len
);
931 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &apb_seq
, &csr
,
932 &burst_size
, &slave_bw
);
936 INIT_LIST_HEAD(&req_list
);
938 ahb_seq
= TEGRA_APBDMA_AHBSEQ_INTR_ENB
;
939 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_WRAP_NONE
<<
940 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT
;
941 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32
;
943 csr
|= TEGRA_APBDMA_CSR_ONCE
| TEGRA_APBDMA_CSR_FLOW
;
944 csr
|= tdc
->dma_sconfig
.slave_id
<< TEGRA_APBDMA_CSR_REQ_SEL_SHIFT
;
945 if (flags
& DMA_PREP_INTERRUPT
)
946 csr
|= TEGRA_APBDMA_CSR_IE_EOC
;
948 apb_seq
|= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1
;
950 dma_desc
= tegra_dma_desc_get(tdc
);
952 dev_err(tdc2dev(tdc
), "Dma descriptors not available\n");
955 INIT_LIST_HEAD(&dma_desc
->tx_list
);
956 INIT_LIST_HEAD(&dma_desc
->cb_node
);
957 dma_desc
->cb_count
= 0;
958 dma_desc
->bytes_requested
= 0;
959 dma_desc
->bytes_transferred
= 0;
960 dma_desc
->dma_status
= DMA_IN_PROGRESS
;
962 /* Make transfer requests */
963 for_each_sg(sgl
, sg
, sg_len
, i
) {
966 mem
= sg_dma_address(sg
);
967 len
= sg_dma_len(sg
);
969 if ((len
& 3) || (mem
& 3) ||
970 (len
> tdc
->tdma
->chip_data
->max_dma_count
)) {
971 dev_err(tdc2dev(tdc
),
972 "Dma length/memory address is not supported\n");
973 tegra_dma_desc_put(tdc
, dma_desc
);
977 sg_req
= tegra_dma_sg_req_get(tdc
);
979 dev_err(tdc2dev(tdc
), "Dma sg-req not available\n");
980 tegra_dma_desc_put(tdc
, dma_desc
);
984 ahb_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
985 dma_desc
->bytes_requested
+= len
;
987 sg_req
->ch_regs
.apb_ptr
= apb_ptr
;
988 sg_req
->ch_regs
.ahb_ptr
= mem
;
989 sg_req
->ch_regs
.csr
= csr
| ((len
- 4) & 0xFFFC);
990 sg_req
->ch_regs
.apb_seq
= apb_seq
;
991 sg_req
->ch_regs
.ahb_seq
= ahb_seq
;
992 sg_req
->configured
= false;
993 sg_req
->last_sg
= false;
994 sg_req
->dma_desc
= dma_desc
;
995 sg_req
->req_len
= len
;
997 list_add_tail(&sg_req
->node
, &dma_desc
->tx_list
);
999 sg_req
->last_sg
= true;
1000 if (flags
& DMA_CTRL_ACK
)
1001 dma_desc
->txd
.flags
= DMA_CTRL_ACK
;
1004 * Make sure that mode should not be conflicting with currently
1007 if (!tdc
->isr_handler
) {
1008 tdc
->isr_handler
= handle_once_dma_done
;
1009 tdc
->cyclic
= false;
1012 dev_err(tdc2dev(tdc
), "DMA configured in cyclic mode\n");
1013 tegra_dma_desc_put(tdc
, dma_desc
);
1018 return &dma_desc
->txd
;
1021 struct dma_async_tx_descriptor
*tegra_dma_prep_dma_cyclic(
1022 struct dma_chan
*dc
, dma_addr_t buf_addr
, size_t buf_len
,
1023 size_t period_len
, enum dma_transfer_direction direction
,
1024 unsigned long flags
, void *context
)
1026 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1027 struct tegra_dma_desc
*dma_desc
= NULL
;
1028 struct tegra_dma_sg_req
*sg_req
= NULL
;
1029 unsigned long csr
, ahb_seq
, apb_ptr
, apb_seq
;
1032 dma_addr_t mem
= buf_addr
;
1034 enum dma_slave_buswidth slave_bw
;
1037 if (!buf_len
|| !period_len
) {
1038 dev_err(tdc2dev(tdc
), "Invalid buffer/period len\n");
1042 if (!tdc
->config_init
) {
1043 dev_err(tdc2dev(tdc
), "DMA slave is not configured\n");
1048 * We allow to take more number of requests till DMA is
1049 * not started. The driver will loop over all requests.
1050 * Once DMA is started then new requests can be queued only after
1051 * terminating the DMA.
1054 dev_err(tdc2dev(tdc
), "Request not allowed when dma running\n");
1059 * We only support cycle transfer when buf_len is multiple of
1062 if (buf_len
% period_len
) {
1063 dev_err(tdc2dev(tdc
), "buf_len is not multiple of period_len\n");
1068 if ((len
& 3) || (buf_addr
& 3) ||
1069 (len
> tdc
->tdma
->chip_data
->max_dma_count
)) {
1070 dev_err(tdc2dev(tdc
), "Req len/mem address is not correct\n");
1074 ret
= get_transfer_param(tdc
, direction
, &apb_ptr
, &apb_seq
, &csr
,
1075 &burst_size
, &slave_bw
);
1080 ahb_seq
= TEGRA_APBDMA_AHBSEQ_INTR_ENB
;
1081 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_WRAP_NONE
<<
1082 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT
;
1083 ahb_seq
|= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32
;
1085 csr
|= TEGRA_APBDMA_CSR_FLOW
;
1086 if (flags
& DMA_PREP_INTERRUPT
)
1087 csr
|= TEGRA_APBDMA_CSR_IE_EOC
;
1088 csr
|= tdc
->dma_sconfig
.slave_id
<< TEGRA_APBDMA_CSR_REQ_SEL_SHIFT
;
1090 apb_seq
|= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1
;
1092 dma_desc
= tegra_dma_desc_get(tdc
);
1094 dev_err(tdc2dev(tdc
), "not enough descriptors available\n");
1098 INIT_LIST_HEAD(&dma_desc
->tx_list
);
1099 INIT_LIST_HEAD(&dma_desc
->cb_node
);
1100 dma_desc
->cb_count
= 0;
1102 dma_desc
->bytes_transferred
= 0;
1103 dma_desc
->bytes_requested
= buf_len
;
1104 remain_len
= buf_len
;
1106 /* Split transfer equal to period size */
1107 while (remain_len
) {
1108 sg_req
= tegra_dma_sg_req_get(tdc
);
1110 dev_err(tdc2dev(tdc
), "Dma sg-req not available\n");
1111 tegra_dma_desc_put(tdc
, dma_desc
);
1115 ahb_seq
|= get_burst_size(tdc
, burst_size
, slave_bw
, len
);
1116 sg_req
->ch_regs
.apb_ptr
= apb_ptr
;
1117 sg_req
->ch_regs
.ahb_ptr
= mem
;
1118 sg_req
->ch_regs
.csr
= csr
| ((len
- 4) & 0xFFFC);
1119 sg_req
->ch_regs
.apb_seq
= apb_seq
;
1120 sg_req
->ch_regs
.ahb_seq
= ahb_seq
;
1121 sg_req
->configured
= false;
1122 sg_req
->half_done
= false;
1123 sg_req
->last_sg
= false;
1124 sg_req
->dma_desc
= dma_desc
;
1125 sg_req
->req_len
= len
;
1127 list_add_tail(&sg_req
->node
, &dma_desc
->tx_list
);
1131 sg_req
->last_sg
= true;
1132 if (flags
& DMA_CTRL_ACK
)
1133 dma_desc
->txd
.flags
= DMA_CTRL_ACK
;
1136 * Make sure that mode should not be conflicting with currently
1139 if (!tdc
->isr_handler
) {
1140 tdc
->isr_handler
= handle_cont_sngl_cycle_dma_done
;
1144 dev_err(tdc2dev(tdc
), "DMA configuration conflict\n");
1145 tegra_dma_desc_put(tdc
, dma_desc
);
1150 return &dma_desc
->txd
;
1153 static int tegra_dma_alloc_chan_resources(struct dma_chan
*dc
)
1155 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1156 struct tegra_dma
*tdma
= tdc
->tdma
;
1159 dma_cookie_init(&tdc
->dma_chan
);
1160 tdc
->config_init
= false;
1161 ret
= clk_prepare_enable(tdma
->dma_clk
);
1163 dev_err(tdc2dev(tdc
), "clk_prepare_enable failed: %d\n", ret
);
1167 static void tegra_dma_free_chan_resources(struct dma_chan
*dc
)
1169 struct tegra_dma_channel
*tdc
= to_tegra_dma_chan(dc
);
1170 struct tegra_dma
*tdma
= tdc
->tdma
;
1172 struct tegra_dma_desc
*dma_desc
;
1173 struct tegra_dma_sg_req
*sg_req
;
1174 struct list_head dma_desc_list
;
1175 struct list_head sg_req_list
;
1176 unsigned long flags
;
1178 INIT_LIST_HEAD(&dma_desc_list
);
1179 INIT_LIST_HEAD(&sg_req_list
);
1181 dev_dbg(tdc2dev(tdc
), "Freeing channel %d\n", tdc
->id
);
1184 tegra_dma_terminate_all(dc
);
1186 spin_lock_irqsave(&tdc
->lock
, flags
);
1187 list_splice_init(&tdc
->pending_sg_req
, &sg_req_list
);
1188 list_splice_init(&tdc
->free_sg_req
, &sg_req_list
);
1189 list_splice_init(&tdc
->free_dma_desc
, &dma_desc_list
);
1190 INIT_LIST_HEAD(&tdc
->cb_desc
);
1191 tdc
->config_init
= false;
1192 spin_unlock_irqrestore(&tdc
->lock
, flags
);
1194 while (!list_empty(&dma_desc_list
)) {
1195 dma_desc
= list_first_entry(&dma_desc_list
,
1196 typeof(*dma_desc
), node
);
1197 list_del(&dma_desc
->node
);
1201 while (!list_empty(&sg_req_list
)) {
1202 sg_req
= list_first_entry(&sg_req_list
, typeof(*sg_req
), node
);
1203 list_del(&sg_req
->node
);
1206 clk_disable_unprepare(tdma
->dma_clk
);
1209 /* Tegra20 specific DMA controller information */
1210 static const struct tegra_dma_chip_data tegra20_dma_chip_data
= {
1212 .max_dma_count
= 1024UL * 64,
1213 .support_channel_pause
= false,
1216 /* Tegra30 specific DMA controller information */
1217 static const struct tegra_dma_chip_data tegra30_dma_chip_data
= {
1219 .max_dma_count
= 1024UL * 64,
1220 .support_channel_pause
= false,
1223 /* Tegra114 specific DMA controller information */
1224 static const struct tegra_dma_chip_data tegra114_dma_chip_data
= {
1226 .max_dma_count
= 1024UL * 64,
1227 .support_channel_pause
= true,
1231 static const struct of_device_id tegra_dma_of_match
[] = {
1233 .compatible
= "nvidia,tegra114-apbdma",
1234 .data
= &tegra114_dma_chip_data
,
1236 .compatible
= "nvidia,tegra30-apbdma",
1237 .data
= &tegra30_dma_chip_data
,
1239 .compatible
= "nvidia,tegra20-apbdma",
1240 .data
= &tegra20_dma_chip_data
,
1244 MODULE_DEVICE_TABLE(of
, tegra_dma_of_match
);
1246 static int tegra_dma_probe(struct platform_device
*pdev
)
1248 struct resource
*res
;
1249 struct tegra_dma
*tdma
;
1252 const struct tegra_dma_chip_data
*cdata
= NULL
;
1253 const struct of_device_id
*match
;
1255 match
= of_match_device(tegra_dma_of_match
, &pdev
->dev
);
1257 dev_err(&pdev
->dev
, "Error: No device match found\n");
1260 cdata
= match
->data
;
1262 tdma
= devm_kzalloc(&pdev
->dev
, sizeof(*tdma
) + cdata
->nr_channels
*
1263 sizeof(struct tegra_dma_channel
), GFP_KERNEL
);
1265 dev_err(&pdev
->dev
, "Error: memory allocation failed\n");
1269 tdma
->dev
= &pdev
->dev
;
1270 tdma
->chip_data
= cdata
;
1271 platform_set_drvdata(pdev
, tdma
);
1273 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1275 dev_err(&pdev
->dev
, "No mem resource for DMA\n");
1279 tdma
->base_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
1280 if (IS_ERR(tdma
->base_addr
))
1281 return PTR_ERR(tdma
->base_addr
);
1283 tdma
->dma_clk
= devm_clk_get(&pdev
->dev
, NULL
);
1284 if (IS_ERR(tdma
->dma_clk
)) {
1285 dev_err(&pdev
->dev
, "Error: Missing controller clock\n");
1286 return PTR_ERR(tdma
->dma_clk
);
1289 spin_lock_init(&tdma
->global_lock
);
1291 pm_runtime_enable(&pdev
->dev
);
1292 if (!pm_runtime_enabled(&pdev
->dev
)) {
1293 ret
= tegra_dma_runtime_resume(&pdev
->dev
);
1295 dev_err(&pdev
->dev
, "dma_runtime_resume failed %d\n",
1297 goto err_pm_disable
;
1301 /* Enable clock before accessing registers */
1302 ret
= clk_prepare_enable(tdma
->dma_clk
);
1304 dev_err(&pdev
->dev
, "clk_prepare_enable failed: %d\n", ret
);
1305 goto err_pm_disable
;
1308 /* Reset DMA controller */
1309 tegra_periph_reset_assert(tdma
->dma_clk
);
1311 tegra_periph_reset_deassert(tdma
->dma_clk
);
1313 /* Enable global DMA registers */
1314 tdma_write(tdma
, TEGRA_APBDMA_GENERAL
, TEGRA_APBDMA_GENERAL_ENABLE
);
1315 tdma_write(tdma
, TEGRA_APBDMA_CONTROL
, 0);
1316 tdma_write(tdma
, TEGRA_APBDMA_IRQ_MASK_SET
, 0xFFFFFFFFul
);
1318 clk_disable_unprepare(tdma
->dma_clk
);
1320 INIT_LIST_HEAD(&tdma
->dma_dev
.channels
);
1321 for (i
= 0; i
< cdata
->nr_channels
; i
++) {
1322 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1324 tdc
->chan_base_offset
= TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET
+
1325 i
* TEGRA_APBDMA_CHANNEL_REGISTER_SIZE
;
1327 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, i
);
1330 dev_err(&pdev
->dev
, "No irq resource for chan %d\n", i
);
1333 tdc
->irq
= res
->start
;
1334 snprintf(tdc
->name
, sizeof(tdc
->name
), "apbdma.%d", i
);
1335 ret
= devm_request_irq(&pdev
->dev
, tdc
->irq
,
1336 tegra_dma_isr
, 0, tdc
->name
, tdc
);
1339 "request_irq failed with err %d channel %d\n",
1344 tdc
->dma_chan
.device
= &tdma
->dma_dev
;
1345 dma_cookie_init(&tdc
->dma_chan
);
1346 list_add_tail(&tdc
->dma_chan
.device_node
,
1347 &tdma
->dma_dev
.channels
);
1351 tasklet_init(&tdc
->tasklet
, tegra_dma_tasklet
,
1352 (unsigned long)tdc
);
1353 spin_lock_init(&tdc
->lock
);
1355 INIT_LIST_HEAD(&tdc
->pending_sg_req
);
1356 INIT_LIST_HEAD(&tdc
->free_sg_req
);
1357 INIT_LIST_HEAD(&tdc
->free_dma_desc
);
1358 INIT_LIST_HEAD(&tdc
->cb_desc
);
1361 dma_cap_set(DMA_SLAVE
, tdma
->dma_dev
.cap_mask
);
1362 dma_cap_set(DMA_PRIVATE
, tdma
->dma_dev
.cap_mask
);
1363 dma_cap_set(DMA_CYCLIC
, tdma
->dma_dev
.cap_mask
);
1365 tdma
->dma_dev
.dev
= &pdev
->dev
;
1366 tdma
->dma_dev
.device_alloc_chan_resources
=
1367 tegra_dma_alloc_chan_resources
;
1368 tdma
->dma_dev
.device_free_chan_resources
=
1369 tegra_dma_free_chan_resources
;
1370 tdma
->dma_dev
.device_prep_slave_sg
= tegra_dma_prep_slave_sg
;
1371 tdma
->dma_dev
.device_prep_dma_cyclic
= tegra_dma_prep_dma_cyclic
;
1372 tdma
->dma_dev
.device_control
= tegra_dma_device_control
;
1373 tdma
->dma_dev
.device_tx_status
= tegra_dma_tx_status
;
1374 tdma
->dma_dev
.device_issue_pending
= tegra_dma_issue_pending
;
1376 ret
= dma_async_device_register(&tdma
->dma_dev
);
1379 "Tegra20 APB DMA driver registration failed %d\n", ret
);
1383 dev_info(&pdev
->dev
, "Tegra20 APB DMA driver register %d channels\n",
1384 cdata
->nr_channels
);
1389 struct tegra_dma_channel
*tdc
= &tdma
->channels
[i
];
1390 tasklet_kill(&tdc
->tasklet
);
1394 pm_runtime_disable(&pdev
->dev
);
1395 if (!pm_runtime_status_suspended(&pdev
->dev
))
1396 tegra_dma_runtime_suspend(&pdev
->dev
);
1400 static int tegra_dma_remove(struct platform_device
*pdev
)
1402 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1404 struct tegra_dma_channel
*tdc
;
1406 dma_async_device_unregister(&tdma
->dma_dev
);
1408 for (i
= 0; i
< tdma
->chip_data
->nr_channels
; ++i
) {
1409 tdc
= &tdma
->channels
[i
];
1410 tasklet_kill(&tdc
->tasklet
);
1413 pm_runtime_disable(&pdev
->dev
);
1414 if (!pm_runtime_status_suspended(&pdev
->dev
))
1415 tegra_dma_runtime_suspend(&pdev
->dev
);
1420 static int tegra_dma_runtime_suspend(struct device
*dev
)
1422 struct platform_device
*pdev
= to_platform_device(dev
);
1423 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1425 clk_disable_unprepare(tdma
->dma_clk
);
1429 static int tegra_dma_runtime_resume(struct device
*dev
)
1431 struct platform_device
*pdev
= to_platform_device(dev
);
1432 struct tegra_dma
*tdma
= platform_get_drvdata(pdev
);
1435 ret
= clk_prepare_enable(tdma
->dma_clk
);
1437 dev_err(dev
, "clk_enable failed: %d\n", ret
);
1443 static const struct dev_pm_ops tegra_dma_dev_pm_ops
= {
1444 #ifdef CONFIG_PM_RUNTIME
1445 .runtime_suspend
= tegra_dma_runtime_suspend
,
1446 .runtime_resume
= tegra_dma_runtime_resume
,
1450 static struct platform_driver tegra_dmac_driver
= {
1452 .name
= "tegra-apbdma",
1453 .owner
= THIS_MODULE
,
1454 .pm
= &tegra_dma_dev_pm_ops
,
1455 .of_match_table
= tegra_dma_of_match
,
1457 .probe
= tegra_dma_probe
,
1458 .remove
= tegra_dma_remove
,
1461 module_platform_driver(tegra_dmac_driver
);
1463 MODULE_ALIAS("platform:tegra20-apbdma");
1464 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1465 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1466 MODULE_LICENSE("GPL v2");