2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TXINTSTATRAW 0x80
36 #define CPDMA_TXINTSTATMASKED 0x84
37 #define CPDMA_TXINTMASKSET 0x88
38 #define CPDMA_TXINTMASKCLEAR 0x8c
39 #define CPDMA_MACINVECTOR 0x90
40 #define CPDMA_MACEOIVECTOR 0x94
41 #define CPDMA_RXINTSTATRAW 0xa0
42 #define CPDMA_RXINTSTATMASKED 0xa4
43 #define CPDMA_RXINTMASKSET 0xa8
44 #define CPDMA_RXINTMASKCLEAR 0xac
45 #define CPDMA_DMAINTSTATRAW 0xb0
46 #define CPDMA_DMAINTSTATMASKED 0xb4
47 #define CPDMA_DMAINTMASKSET 0xb8
48 #define CPDMA_DMAINTMASKCLEAR 0xbc
49 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL 0x20
53 #define CPDMA_DMASTATUS 0x24
54 #define CPDMA_RXBUFFOFS 0x28
55 #define CPDMA_EM_CONTROL 0x2c
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP BIT(31)
59 #define CPDMA_DESC_EOP BIT(30)
60 #define CPDMA_DESC_OWNER BIT(29)
61 #define CPDMA_DESC_EOQ BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE BIT(27)
63 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN BIT(20)
65 #define CPDMA_TO_PORT_SHIFT 16
66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN 4
69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
83 struct cpdma_desc_pool
{
86 void __iomem
*iomap
; /* ioremap map */
87 void *cpumap
; /* dma_alloc map */
88 int desc_size
, mem_size
;
89 int num_desc
, used_desc
;
90 unsigned long *bitmap
;
101 static const char *cpdma_state_str
[] = { "idle", "active", "teardown" };
104 enum cpdma_state state
;
105 struct cpdma_params params
;
107 struct cpdma_desc_pool
*pool
;
109 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
113 struct cpdma_desc __iomem
*head
, *tail
;
114 void __iomem
*hdp
, *cp
, *rxfree
;
115 enum cpdma_state state
;
116 struct cpdma_ctlr
*ctlr
;
121 cpdma_handler_fn handler
;
122 enum dma_data_direction dir
;
123 struct cpdma_chan_stats stats
;
124 /* offsets into dmaregs */
125 int int_set
, int_clear
, td
;
128 /* The following make access to common cpdma_ctlr params more readable */
129 #define dmaregs params.dmaregs
130 #define num_chan params.num_chan
132 /* various accessors */
133 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
134 #define chan_read(chan, fld) __raw_readl((chan)->fld)
135 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
136 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
137 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
138 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
140 #define cpdma_desc_to_port(chan, mode, directed) \
142 if (!is_rx_chan(chan) && ((directed == 1) || \
144 mode |= (CPDMA_DESC_TO_PORT_EN | \
145 (directed << CPDMA_TO_PORT_SHIFT)); \
149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
150 * emac) have dedicated on-chip memory for these descriptors. Some other
151 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
152 * abstract out these details
154 static struct cpdma_desc_pool
*
155 cpdma_desc_pool_create(struct device
*dev
, u32 phys
, u32 hw_addr
,
159 struct cpdma_desc_pool
*pool
;
161 pool
= devm_kzalloc(dev
, sizeof(*pool
), GFP_KERNEL
);
165 spin_lock_init(&pool
->lock
);
168 pool
->mem_size
= size
;
169 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
), align
);
170 pool
->num_desc
= size
/ pool
->desc_size
;
172 bitmap_size
= (pool
->num_desc
/ BITS_PER_LONG
) * sizeof(long);
173 pool
->bitmap
= devm_kzalloc(dev
, bitmap_size
, GFP_KERNEL
);
179 pool
->iomap
= ioremap(phys
, size
);
180 pool
->hw_addr
= hw_addr
;
182 pool
->cpumap
= dma_alloc_coherent(dev
, size
, &pool
->phys
,
184 pool
->iomap
= pool
->cpumap
;
185 pool
->hw_addr
= pool
->phys
;
194 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool
*pool
)
201 spin_lock_irqsave(&pool
->lock
, flags
);
202 WARN_ON(pool
->used_desc
);
204 dma_free_coherent(pool
->dev
, pool
->mem_size
, pool
->cpumap
,
207 iounmap(pool
->iomap
);
209 spin_unlock_irqrestore(&pool
->lock
, flags
);
212 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
213 struct cpdma_desc __iomem
*desc
)
217 return pool
->hw_addr
+ (__force
long)desc
- (__force
long)pool
->iomap
;
220 static inline struct cpdma_desc __iomem
*
221 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
223 return dma
? pool
->iomap
+ dma
- pool
->hw_addr
: NULL
;
226 static struct cpdma_desc __iomem
*
227 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
, int num_desc
, bool is_rx
)
233 struct cpdma_desc __iomem
*desc
= NULL
;
235 spin_lock_irqsave(&pool
->lock
, flags
);
239 desc_end
= pool
->num_desc
/2;
241 desc_start
= pool
->num_desc
/2;
242 desc_end
= pool
->num_desc
;
245 index
= bitmap_find_next_zero_area(pool
->bitmap
,
246 desc_end
, desc_start
, num_desc
, 0);
247 if (index
< desc_end
) {
248 bitmap_set(pool
->bitmap
, index
, num_desc
);
249 desc
= pool
->iomap
+ pool
->desc_size
* index
;
253 spin_unlock_irqrestore(&pool
->lock
, flags
);
257 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
258 struct cpdma_desc __iomem
*desc
, int num_desc
)
260 unsigned long flags
, index
;
262 index
= ((unsigned long)desc
- (unsigned long)pool
->iomap
) /
264 spin_lock_irqsave(&pool
->lock
, flags
);
265 bitmap_clear(pool
->bitmap
, index
, num_desc
);
267 spin_unlock_irqrestore(&pool
->lock
, flags
);
270 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
272 struct cpdma_ctlr
*ctlr
;
274 ctlr
= devm_kzalloc(params
->dev
, sizeof(*ctlr
), GFP_KERNEL
);
278 ctlr
->state
= CPDMA_STATE_IDLE
;
279 ctlr
->params
= *params
;
280 ctlr
->dev
= params
->dev
;
281 spin_lock_init(&ctlr
->lock
);
283 ctlr
->pool
= cpdma_desc_pool_create(ctlr
->dev
,
284 ctlr
->params
.desc_mem_phys
,
285 ctlr
->params
.desc_hw_addr
,
286 ctlr
->params
.desc_mem_size
,
287 ctlr
->params
.desc_align
);
293 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
294 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
297 EXPORT_SYMBOL_GPL(cpdma_ctlr_create
);
299 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
304 spin_lock_irqsave(&ctlr
->lock
, flags
);
305 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
306 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
310 if (ctlr
->params
.has_soft_reset
) {
311 unsigned timeout
= 10 * 100;
313 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
315 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
323 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
324 __raw_writel(0, ctlr
->params
.txhdp
+ 4 * i
);
325 __raw_writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
326 __raw_writel(0, ctlr
->params
.txcp
+ 4 * i
);
327 __raw_writel(0, ctlr
->params
.rxcp
+ 4 * i
);
330 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
331 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
333 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
334 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
336 ctlr
->state
= CPDMA_STATE_ACTIVE
;
338 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
339 if (ctlr
->channels
[i
])
340 cpdma_chan_start(ctlr
->channels
[i
]);
342 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
345 EXPORT_SYMBOL_GPL(cpdma_ctlr_start
);
347 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
352 spin_lock_irqsave(&ctlr
->lock
, flags
);
353 if (ctlr
->state
== CPDMA_STATE_TEARDOWN
) {
354 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
358 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
360 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
361 if (ctlr
->channels
[i
])
362 cpdma_chan_stop(ctlr
->channels
[i
]);
365 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
366 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
368 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
369 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
371 ctlr
->state
= CPDMA_STATE_IDLE
;
373 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
376 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop
);
378 int cpdma_ctlr_dump(struct cpdma_ctlr
*ctlr
)
380 struct device
*dev
= ctlr
->dev
;
384 spin_lock_irqsave(&ctlr
->lock
, flags
);
386 dev_info(dev
, "CPDMA: state: %s", cpdma_state_str
[ctlr
->state
]);
388 dev_info(dev
, "CPDMA: txidver: %x",
389 dma_reg_read(ctlr
, CPDMA_TXIDVER
));
390 dev_info(dev
, "CPDMA: txcontrol: %x",
391 dma_reg_read(ctlr
, CPDMA_TXCONTROL
));
392 dev_info(dev
, "CPDMA: txteardown: %x",
393 dma_reg_read(ctlr
, CPDMA_TXTEARDOWN
));
394 dev_info(dev
, "CPDMA: rxidver: %x",
395 dma_reg_read(ctlr
, CPDMA_RXIDVER
));
396 dev_info(dev
, "CPDMA: rxcontrol: %x",
397 dma_reg_read(ctlr
, CPDMA_RXCONTROL
));
398 dev_info(dev
, "CPDMA: softreset: %x",
399 dma_reg_read(ctlr
, CPDMA_SOFTRESET
));
400 dev_info(dev
, "CPDMA: rxteardown: %x",
401 dma_reg_read(ctlr
, CPDMA_RXTEARDOWN
));
402 dev_info(dev
, "CPDMA: txintstatraw: %x",
403 dma_reg_read(ctlr
, CPDMA_TXINTSTATRAW
));
404 dev_info(dev
, "CPDMA: txintstatmasked: %x",
405 dma_reg_read(ctlr
, CPDMA_TXINTSTATMASKED
));
406 dev_info(dev
, "CPDMA: txintmaskset: %x",
407 dma_reg_read(ctlr
, CPDMA_TXINTMASKSET
));
408 dev_info(dev
, "CPDMA: txintmaskclear: %x",
409 dma_reg_read(ctlr
, CPDMA_TXINTMASKCLEAR
));
410 dev_info(dev
, "CPDMA: macinvector: %x",
411 dma_reg_read(ctlr
, CPDMA_MACINVECTOR
));
412 dev_info(dev
, "CPDMA: maceoivector: %x",
413 dma_reg_read(ctlr
, CPDMA_MACEOIVECTOR
));
414 dev_info(dev
, "CPDMA: rxintstatraw: %x",
415 dma_reg_read(ctlr
, CPDMA_RXINTSTATRAW
));
416 dev_info(dev
, "CPDMA: rxintstatmasked: %x",
417 dma_reg_read(ctlr
, CPDMA_RXINTSTATMASKED
));
418 dev_info(dev
, "CPDMA: rxintmaskset: %x",
419 dma_reg_read(ctlr
, CPDMA_RXINTMASKSET
));
420 dev_info(dev
, "CPDMA: rxintmaskclear: %x",
421 dma_reg_read(ctlr
, CPDMA_RXINTMASKCLEAR
));
422 dev_info(dev
, "CPDMA: dmaintstatraw: %x",
423 dma_reg_read(ctlr
, CPDMA_DMAINTSTATRAW
));
424 dev_info(dev
, "CPDMA: dmaintstatmasked: %x",
425 dma_reg_read(ctlr
, CPDMA_DMAINTSTATMASKED
));
426 dev_info(dev
, "CPDMA: dmaintmaskset: %x",
427 dma_reg_read(ctlr
, CPDMA_DMAINTMASKSET
));
428 dev_info(dev
, "CPDMA: dmaintmaskclear: %x",
429 dma_reg_read(ctlr
, CPDMA_DMAINTMASKCLEAR
));
431 if (!ctlr
->params
.has_ext_regs
) {
432 dev_info(dev
, "CPDMA: dmacontrol: %x",
433 dma_reg_read(ctlr
, CPDMA_DMACONTROL
));
434 dev_info(dev
, "CPDMA: dmastatus: %x",
435 dma_reg_read(ctlr
, CPDMA_DMASTATUS
));
436 dev_info(dev
, "CPDMA: rxbuffofs: %x",
437 dma_reg_read(ctlr
, CPDMA_RXBUFFOFS
));
440 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
441 if (ctlr
->channels
[i
])
442 cpdma_chan_dump(ctlr
->channels
[i
]);
444 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
447 EXPORT_SYMBOL_GPL(cpdma_ctlr_dump
);
449 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
457 spin_lock_irqsave(&ctlr
->lock
, flags
);
458 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
459 cpdma_ctlr_stop(ctlr
);
461 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
462 cpdma_chan_destroy(ctlr
->channels
[i
]);
464 cpdma_desc_pool_destroy(ctlr
->pool
);
465 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
468 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy
);
470 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
475 spin_lock_irqsave(&ctlr
->lock
, flags
);
476 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
477 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
481 reg
= enable
? CPDMA_DMAINTMASKSET
: CPDMA_DMAINTMASKCLEAR
;
482 dma_reg_write(ctlr
, reg
, CPDMA_DMAINT_HOSTERR
);
484 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
485 if (ctlr
->channels
[i
])
486 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
489 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
492 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl
);
494 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
, u32 value
)
496 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, value
);
498 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi
);
500 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
501 cpdma_handler_fn handler
)
503 struct cpdma_chan
*chan
;
504 int offset
= (chan_num
% CPDMA_MAX_CHANNELS
) * 4;
507 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
510 chan
= devm_kzalloc(ctlr
->dev
, sizeof(*chan
), GFP_KERNEL
);
512 return ERR_PTR(-ENOMEM
);
514 spin_lock_irqsave(&ctlr
->lock
, flags
);
515 if (ctlr
->channels
[chan_num
]) {
516 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
517 devm_kfree(ctlr
->dev
, chan
);
518 return ERR_PTR(-EBUSY
);
522 chan
->state
= CPDMA_STATE_IDLE
;
523 chan
->chan_num
= chan_num
;
524 chan
->handler
= handler
;
526 if (is_rx_chan(chan
)) {
527 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
528 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
529 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
530 chan
->int_set
= CPDMA_RXINTMASKSET
;
531 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
532 chan
->td
= CPDMA_RXTEARDOWN
;
533 chan
->dir
= DMA_FROM_DEVICE
;
535 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
536 chan
->cp
= ctlr
->params
.txcp
+ offset
;
537 chan
->int_set
= CPDMA_TXINTMASKSET
;
538 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
539 chan
->td
= CPDMA_TXTEARDOWN
;
540 chan
->dir
= DMA_TO_DEVICE
;
542 chan
->mask
= BIT(chan_linear(chan
));
544 spin_lock_init(&chan
->lock
);
546 ctlr
->channels
[chan_num
] = chan
;
547 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
550 EXPORT_SYMBOL_GPL(cpdma_chan_create
);
552 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
554 struct cpdma_ctlr
*ctlr
;
561 spin_lock_irqsave(&ctlr
->lock
, flags
);
562 if (chan
->state
!= CPDMA_STATE_IDLE
)
563 cpdma_chan_stop(chan
);
564 ctlr
->channels
[chan
->chan_num
] = NULL
;
565 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
569 EXPORT_SYMBOL_GPL(cpdma_chan_destroy
);
571 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
572 struct cpdma_chan_stats
*stats
)
577 spin_lock_irqsave(&chan
->lock
, flags
);
578 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
579 spin_unlock_irqrestore(&chan
->lock
, flags
);
582 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats
);
584 int cpdma_chan_dump(struct cpdma_chan
*chan
)
587 struct device
*dev
= chan
->ctlr
->dev
;
589 spin_lock_irqsave(&chan
->lock
, flags
);
591 dev_info(dev
, "channel %d (%s %d) state %s",
592 chan
->chan_num
, is_rx_chan(chan
) ? "rx" : "tx",
593 chan_linear(chan
), cpdma_state_str
[chan
->state
]);
594 dev_info(dev
, "\thdp: %x\n", chan_read(chan
, hdp
));
595 dev_info(dev
, "\tcp: %x\n", chan_read(chan
, cp
));
597 dev_info(dev
, "\trxfree: %x\n",
598 chan_read(chan
, rxfree
));
601 dev_info(dev
, "\tstats head_enqueue: %d\n",
602 chan
->stats
.head_enqueue
);
603 dev_info(dev
, "\tstats tail_enqueue: %d\n",
604 chan
->stats
.tail_enqueue
);
605 dev_info(dev
, "\tstats pad_enqueue: %d\n",
606 chan
->stats
.pad_enqueue
);
607 dev_info(dev
, "\tstats misqueued: %d\n",
608 chan
->stats
.misqueued
);
609 dev_info(dev
, "\tstats desc_alloc_fail: %d\n",
610 chan
->stats
.desc_alloc_fail
);
611 dev_info(dev
, "\tstats pad_alloc_fail: %d\n",
612 chan
->stats
.pad_alloc_fail
);
613 dev_info(dev
, "\tstats runt_receive_buff: %d\n",
614 chan
->stats
.runt_receive_buff
);
615 dev_info(dev
, "\tstats runt_transmit_buff: %d\n",
616 chan
->stats
.runt_transmit_buff
);
617 dev_info(dev
, "\tstats empty_dequeue: %d\n",
618 chan
->stats
.empty_dequeue
);
619 dev_info(dev
, "\tstats busy_dequeue: %d\n",
620 chan
->stats
.busy_dequeue
);
621 dev_info(dev
, "\tstats good_dequeue: %d\n",
622 chan
->stats
.good_dequeue
);
623 dev_info(dev
, "\tstats requeue: %d\n",
624 chan
->stats
.requeue
);
625 dev_info(dev
, "\tstats teardown_dequeue: %d\n",
626 chan
->stats
.teardown_dequeue
);
628 spin_unlock_irqrestore(&chan
->lock
, flags
);
632 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
633 struct cpdma_desc __iomem
*desc
)
635 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
636 struct cpdma_desc __iomem
*prev
= chan
->tail
;
637 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
641 desc_dma
= desc_phys(pool
, desc
);
643 /* simple case - idle channel */
645 chan
->stats
.head_enqueue
++;
648 if (chan
->state
== CPDMA_STATE_ACTIVE
)
649 chan_write(chan
, hdp
, desc_dma
);
653 /* first chain the descriptor at the tail of the list */
654 desc_write(prev
, hw_next
, desc_dma
);
656 chan
->stats
.tail_enqueue
++;
658 /* next check if EOQ has been triggered already */
659 mode
= desc_read(prev
, hw_mode
);
660 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
661 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
662 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
663 chan_write(chan
, hdp
, desc_dma
);
664 chan
->stats
.misqueued
++;
668 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
669 int len
, int directed
)
671 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
672 struct cpdma_desc __iomem
*desc
;
678 spin_lock_irqsave(&chan
->lock
, flags
);
680 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
685 desc
= cpdma_desc_alloc(ctlr
->pool
, 1, is_rx_chan(chan
));
687 chan
->stats
.desc_alloc_fail
++;
692 if (len
< ctlr
->params
.min_packet_size
) {
693 len
= ctlr
->params
.min_packet_size
;
694 chan
->stats
.runt_transmit_buff
++;
697 buffer
= dma_map_single(ctlr
->dev
, data
, len
, chan
->dir
);
698 ret
= dma_mapping_error(ctlr
->dev
, buffer
);
700 cpdma_desc_free(ctlr
->pool
, desc
, 1);
705 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
706 cpdma_desc_to_port(chan
, mode
, directed
);
708 desc_write(desc
, hw_next
, 0);
709 desc_write(desc
, hw_buffer
, buffer
);
710 desc_write(desc
, hw_len
, len
);
711 desc_write(desc
, hw_mode
, mode
| len
);
712 desc_write(desc
, sw_token
, token
);
713 desc_write(desc
, sw_buffer
, buffer
);
714 desc_write(desc
, sw_len
, len
);
716 __cpdma_chan_submit(chan
, desc
);
718 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
719 chan_write(chan
, rxfree
, 1);
724 spin_unlock_irqrestore(&chan
->lock
, flags
);
727 EXPORT_SYMBOL_GPL(cpdma_chan_submit
);
729 bool cpdma_check_free_tx_desc(struct cpdma_chan
*chan
)
734 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
735 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
737 spin_lock_irqsave(&pool
->lock
, flags
);
739 index
= bitmap_find_next_zero_area(pool
->bitmap
,
740 pool
->num_desc
, pool
->num_desc
/2, 1, 0);
742 if (index
< pool
->num_desc
)
747 spin_unlock_irqrestore(&pool
->lock
, flags
);
750 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc
);
752 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
753 struct cpdma_desc __iomem
*desc
,
754 int outlen
, int status
)
756 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
757 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
762 token
= (void *)desc_read(desc
, sw_token
);
763 buff_dma
= desc_read(desc
, sw_buffer
);
764 origlen
= desc_read(desc
, sw_len
);
766 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
767 cpdma_desc_free(pool
, desc
, 1);
768 (*chan
->handler
)(token
, outlen
, status
);
771 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
773 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
774 struct cpdma_desc __iomem
*desc
;
777 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
781 spin_lock_irqsave(&chan
->lock
, flags
);
785 chan
->stats
.empty_dequeue
++;
789 desc_dma
= desc_phys(pool
, desc
);
791 status
= __raw_readl(&desc
->hw_mode
);
792 outlen
= status
& 0x7ff;
793 if (status
& CPDMA_DESC_OWNER
) {
794 chan
->stats
.busy_dequeue
++;
799 if (status
& CPDMA_DESC_PASS_CRC
)
800 outlen
-= CPDMA_DESC_CRC_LEN
;
802 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
|
803 CPDMA_DESC_PORT_MASK
);
805 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
806 chan_write(chan
, cp
, desc_dma
);
808 chan
->stats
.good_dequeue
++;
810 if (status
& CPDMA_DESC_EOQ
) {
811 chan
->stats
.requeue
++;
812 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
815 spin_unlock_irqrestore(&chan
->lock
, flags
);
816 if (unlikely(status
& CPDMA_DESC_TD_COMPLETE
))
821 __cpdma_chan_free(chan
, desc
, outlen
, cb_status
);
825 spin_unlock_irqrestore(&chan
->lock
, flags
);
829 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
831 int used
= 0, ret
= 0;
833 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
836 while (used
< quota
) {
837 ret
= __cpdma_chan_process(chan
);
844 EXPORT_SYMBOL_GPL(cpdma_chan_process
);
846 int cpdma_chan_start(struct cpdma_chan
*chan
)
848 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
849 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
852 spin_lock_irqsave(&chan
->lock
, flags
);
853 if (chan
->state
!= CPDMA_STATE_IDLE
) {
854 spin_unlock_irqrestore(&chan
->lock
, flags
);
857 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
858 spin_unlock_irqrestore(&chan
->lock
, flags
);
861 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
862 chan
->state
= CPDMA_STATE_ACTIVE
;
864 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
866 chan_write(chan
, rxfree
, chan
->count
);
869 spin_unlock_irqrestore(&chan
->lock
, flags
);
872 EXPORT_SYMBOL_GPL(cpdma_chan_start
);
874 int cpdma_chan_stop(struct cpdma_chan
*chan
)
876 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
877 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
882 spin_lock_irqsave(&chan
->lock
, flags
);
883 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
884 spin_unlock_irqrestore(&chan
->lock
, flags
);
888 chan
->state
= CPDMA_STATE_TEARDOWN
;
889 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
891 /* trigger teardown */
892 dma_reg_write(ctlr
, chan
->td
, chan_linear(chan
));
894 /* wait for teardown complete */
895 timeout
= 100 * 100; /* 100 ms */
897 u32 cp
= chan_read(chan
, cp
);
898 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
904 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
906 /* handle completed packets */
907 spin_unlock_irqrestore(&chan
->lock
, flags
);
909 ret
= __cpdma_chan_process(chan
);
912 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
913 spin_lock_irqsave(&chan
->lock
, flags
);
915 /* remaining packets haven't been tx/rx'ed, clean them up */
917 struct cpdma_desc __iomem
*desc
= chan
->head
;
920 next_dma
= desc_read(desc
, hw_next
);
921 chan
->head
= desc_from_phys(pool
, next_dma
);
923 chan
->stats
.teardown_dequeue
++;
925 /* issue callback without locks held */
926 spin_unlock_irqrestore(&chan
->lock
, flags
);
927 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
928 spin_lock_irqsave(&chan
->lock
, flags
);
931 chan
->state
= CPDMA_STATE_IDLE
;
932 spin_unlock_irqrestore(&chan
->lock
, flags
);
935 EXPORT_SYMBOL_GPL(cpdma_chan_stop
);
937 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
941 spin_lock_irqsave(&chan
->lock
, flags
);
942 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
943 spin_unlock_irqrestore(&chan
->lock
, flags
);
947 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
949 spin_unlock_irqrestore(&chan
->lock
, flags
);
954 struct cpdma_control_info
{
958 #define ACCESS_RO BIT(0)
959 #define ACCESS_WO BIT(1)
960 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
963 static struct cpdma_control_info controls
[] = {
964 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
965 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
966 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
967 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
968 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
969 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
970 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
971 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
972 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
973 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
974 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
977 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
980 struct cpdma_control_info
*info
= &controls
[control
];
983 spin_lock_irqsave(&ctlr
->lock
, flags
);
986 if (!ctlr
->params
.has_ext_regs
)
990 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
994 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
998 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
1001 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
1004 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1008 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
1010 unsigned long flags
;
1011 struct cpdma_control_info
*info
= &controls
[control
];
1015 spin_lock_irqsave(&ctlr
->lock
, flags
);
1018 if (!ctlr
->params
.has_ext_regs
)
1022 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
1026 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
1030 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
1033 val
= dma_reg_read(ctlr
, info
->reg
);
1034 val
&= ~(info
->mask
<< info
->shift
);
1035 val
|= (value
& info
->mask
) << info
->shift
;
1036 dma_reg_write(ctlr
, info
->reg
, val
);
1040 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1043 EXPORT_SYMBOL_GPL(cpdma_control_set
);
1045 MODULE_LICENSE("GPL");