2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <bcmendian.h>
31 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
32 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
34 #define DMA_ERROR(args)
35 #define DMA_TRACE(args)
38 #define DMA_NONE(args)
40 #define d32txregs dregs.d32_u.txregs_32
41 #define d32rxregs dregs.d32_u.rxregs_32
42 #define txd32 dregs.d32_u.txd_32
43 #define rxd32 dregs.d32_u.rxd_32
45 #define d64txregs dregs.d64_u.txregs_64
46 #define d64rxregs dregs.d64_u.rxregs_64
47 #define txd64 dregs.d64_u.txd_64
48 #define rxd64 dregs.d64_u.rxd_64
50 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
51 static uint dma_msg_level
;
53 #define MAXNAMEL 8 /* 8 char names */
55 #define DI_INFO(dmah) ((dma_info_t *)dmah)
57 /* dma engine software state */
58 typedef struct dma_info
{
59 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
60 * which could be const
62 uint
*msg_level
; /* message level pointer */
63 char name
[MAXNAMEL
]; /* callers name for diag msgs */
65 void *osh
; /* os handle */
66 si_t
*sih
; /* sb handle */
68 bool dma64
; /* this dma engine is operating in 64-bit mode */
69 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
73 dma32regs_t
*txregs_32
; /* 32-bit dma tx engine registers */
74 dma32regs_t
*rxregs_32
; /* 32-bit dma rx engine registers */
75 dma32dd_t
*txd_32
; /* pointer to dma32 tx descriptor ring */
76 dma32dd_t
*rxd_32
; /* pointer to dma32 rx descriptor ring */
79 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
80 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
81 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
82 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
86 uint16 dmadesc_align
; /* alignment requirement for dma descriptors */
88 uint16 ntxd
; /* # tx descriptors tunable */
89 uint16 txin
; /* index of next descriptor to reclaim */
90 uint16 txout
; /* index of next descriptor to post */
91 void **txp
; /* pointer to parallel array of pointers to packets */
92 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
93 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
94 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
95 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
96 uint16 txdalign
; /* #bytes added to alloc'd mem to align txd */
97 uint32 txdalloc
; /* #bytes allocated for the ring */
98 uint32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
99 * is not just an index, it needs all 13 bits to be
100 * an offset from the addr register.
103 uint16 nrxd
; /* # rx descriptors tunable */
104 uint16 rxin
; /* index of next descriptor to reclaim */
105 uint16 rxout
; /* index of next descriptor to post */
106 void **rxp
; /* pointer to parallel array of pointers to packets */
107 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
108 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
109 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
110 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
111 uint16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
112 uint32 rxdalloc
; /* #bytes allocated for the ring */
113 uint32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
116 uint16 rxbufsize
; /* rx buffer size in bytes,
117 * not including the extra headroom
119 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
120 * e.g. some rx pkt buffers will be bridged to tx side
121 * without byte copying. The extra headroom needs to be
122 * large enough to fit txheader needs.
123 * Some dongle driver may not need it.
125 uint nrxpost
; /* # rx buffers to keep posted */
126 uint rxoffset
; /* rxcontrol offset */
127 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
128 uint ddoffsethigh
; /* high 32 bits */
129 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
130 uint dataoffsethigh
; /* high 32 bits */
131 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
135 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
136 * Otherwise it will support only 64-bit.
138 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
139 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
141 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
144 #define DMA32_ENAB(di) 1
145 #define DMA64_ENAB(di) 1
146 #define DMA64_MODE(di) ((di)->dma64)
147 #else /* !BCMDMA32 */
148 #define DMA32_ENAB(di) 0
149 #define DMA64_ENAB(di) 1
150 #define DMA64_MODE(di) 1
151 #endif /* !BCMDMA32 */
153 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
154 #ifdef BCMDMASGLISTOSL
155 #define DMASGLIST_ENAB TRUE
157 #define DMASGLIST_ENAB FALSE
158 #endif /* BCMDMASGLISTOSL */
160 /* descriptor bumping macros */
161 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
162 #define TXD(x) XXD((x), di->ntxd)
163 #define RXD(x) XXD((x), di->nrxd)
164 #define NEXTTXD(i) TXD((i) + 1)
165 #define PREVTXD(i) TXD((i) - 1)
166 #define NEXTRXD(i) RXD((i) + 1)
167 #define PREVRXD(i) RXD((i) - 1)
169 #define NTXDACTIVE(h, t) TXD((t) - (h))
170 #define NRXDACTIVE(h, t) RXD((t) - (h))
172 /* macros to convert between byte offsets and indexes */
173 #define B2I(bytes, type) ((bytes) / sizeof(type))
174 #define I2B(index, type) ((index) * sizeof(type))
176 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
177 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
179 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
180 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
182 /* Common prototypes */
183 static bool _dma_isaddrext(dma_info_t
*di
);
184 static bool _dma_descriptor_align(dma_info_t
*di
);
185 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
186 static void _dma_detach(dma_info_t
*di
);
187 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
188 static void _dma_rxinit(dma_info_t
*di
);
189 static void *_dma_rx(dma_info_t
*di
);
190 static bool _dma_rxfill(dma_info_t
*di
);
191 static void _dma_rxreclaim(dma_info_t
*di
);
192 static void _dma_rxenable(dma_info_t
*di
);
193 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
194 static void _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
,
197 static void _dma_txblock(dma_info_t
*di
);
198 static void _dma_txunblock(dma_info_t
*di
);
199 static uint
_dma_txactive(dma_info_t
*di
);
200 static uint
_dma_rxactive(dma_info_t
*di
);
201 static uint
_dma_txpending(dma_info_t
*di
);
202 static uint
_dma_txcommitted(dma_info_t
*di
);
204 static void *_dma_peeknexttxp(dma_info_t
*di
);
205 static void *_dma_peeknextrxp(dma_info_t
*di
);
206 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
207 static void _dma_counterreset(dma_info_t
*di
);
208 static void _dma_fifoloopbackenable(dma_info_t
*di
);
209 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
210 static uint8
dma_align_sizetobits(uint size
);
211 static void *dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
,
212 uint16
*alignbits
, uint
*alloced
,
213 dmaaddr_t
*descpa
, osldma_t
**dmah
);
215 /* Prototypes for 32-bit routines */
216 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
217 static bool dma32_txreset(dma_info_t
*di
);
218 static bool dma32_rxreset(dma_info_t
*di
);
219 static bool dma32_txsuspendedidle(dma_info_t
*di
);
220 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
221 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
);
222 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
223 static void dma32_txrotate(dma_info_t
*di
);
224 static bool dma32_rxidle(dma_info_t
*di
);
225 static void dma32_txinit(dma_info_t
*di
);
226 static bool dma32_txenabled(dma_info_t
*di
);
227 static void dma32_txsuspend(dma_info_t
*di
);
228 static void dma32_txresume(dma_info_t
*di
);
229 static bool dma32_txsuspended(dma_info_t
*di
);
230 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
);
231 static bool dma32_txstopped(dma_info_t
*di
);
232 static bool dma32_rxstopped(dma_info_t
*di
);
233 static bool dma32_rxenabled(dma_info_t
*di
);
235 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
237 /* Prototypes for 64-bit routines */
238 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
239 static bool dma64_txreset(dma_info_t
*di
);
240 static bool dma64_rxreset(dma_info_t
*di
);
241 static bool dma64_txsuspendedidle(dma_info_t
*di
);
242 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
243 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
244 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
245 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
246 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
247 static void dma64_txrotate(dma_info_t
*di
);
249 static bool dma64_rxidle(dma_info_t
*di
);
250 static void dma64_txinit(dma_info_t
*di
);
251 static bool dma64_txenabled(dma_info_t
*di
);
252 static void dma64_txsuspend(dma_info_t
*di
);
253 static void dma64_txresume(dma_info_t
*di
);
254 static bool dma64_txsuspended(dma_info_t
*di
);
255 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
256 static bool dma64_txstopped(dma_info_t
*di
);
257 static bool dma64_rxstopped(dma_info_t
*di
);
258 static bool dma64_rxenabled(dma_info_t
*di
);
259 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
261 STATIC INLINE uint32
parity32(uint32 data
);
263 const di_fcn_t dma64proc
= {
264 (di_detach_t
) _dma_detach
,
265 (di_txinit_t
) dma64_txinit
,
266 (di_txreset_t
) dma64_txreset
,
267 (di_txenabled_t
) dma64_txenabled
,
268 (di_txsuspend_t
) dma64_txsuspend
,
269 (di_txresume_t
) dma64_txresume
,
270 (di_txsuspended_t
) dma64_txsuspended
,
271 (di_txsuspendedidle_t
) dma64_txsuspendedidle
,
272 (di_txfast_t
) dma64_txfast
,
273 (di_txunframed_t
) dma64_txunframed
,
274 (di_getpos_t
) dma64_getpos
,
275 (di_txstopped_t
) dma64_txstopped
,
276 (di_txreclaim_t
) dma64_txreclaim
,
277 (di_getnexttxp_t
) dma64_getnexttxp
,
278 (di_peeknexttxp_t
) _dma_peeknexttxp
,
279 (di_txblock_t
) _dma_txblock
,
280 (di_txunblock_t
) _dma_txunblock
,
281 (di_txactive_t
) _dma_txactive
,
282 (di_txrotate_t
) dma64_txrotate
,
284 (di_rxinit_t
) _dma_rxinit
,
285 (di_rxreset_t
) dma64_rxreset
,
286 (di_rxidle_t
) dma64_rxidle
,
287 (di_rxstopped_t
) dma64_rxstopped
,
288 (di_rxenable_t
) _dma_rxenable
,
289 (di_rxenabled_t
) dma64_rxenabled
,
291 (di_rxfill_t
) _dma_rxfill
,
292 (di_rxreclaim_t
) _dma_rxreclaim
,
293 (di_getnextrxp_t
) _dma_getnextrxp
,
294 (di_peeknextrxp_t
) _dma_peeknextrxp
,
295 (di_rxparam_get_t
) _dma_rx_param_get
,
297 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
298 (di_getvar_t
) _dma_getvar
,
299 (di_counterreset_t
) _dma_counterreset
,
300 (di_ctrlflags_t
) _dma_ctrlflags
,
304 (di_rxactive_t
) _dma_rxactive
,
305 (di_txpending_t
) _dma_txpending
,
306 (di_txcommitted_t
) _dma_txcommitted
,
310 static const di_fcn_t dma32proc
= {
311 (di_detach_t
) _dma_detach
,
312 (di_txinit_t
) dma32_txinit
,
313 (di_txreset_t
) dma32_txreset
,
314 (di_txenabled_t
) dma32_txenabled
,
315 (di_txsuspend_t
) dma32_txsuspend
,
316 (di_txresume_t
) dma32_txresume
,
317 (di_txsuspended_t
) dma32_txsuspended
,
318 (di_txsuspendedidle_t
) dma32_txsuspendedidle
,
319 (di_txfast_t
) dma32_txfast
,
322 (di_txstopped_t
) dma32_txstopped
,
323 (di_txreclaim_t
) dma32_txreclaim
,
324 (di_getnexttxp_t
) dma32_getnexttxp
,
325 (di_peeknexttxp_t
) _dma_peeknexttxp
,
326 (di_txblock_t
) _dma_txblock
,
327 (di_txunblock_t
) _dma_txunblock
,
328 (di_txactive_t
) _dma_txactive
,
329 (di_txrotate_t
) dma32_txrotate
,
331 (di_rxinit_t
) _dma_rxinit
,
332 (di_rxreset_t
) dma32_rxreset
,
333 (di_rxidle_t
) dma32_rxidle
,
334 (di_rxstopped_t
) dma32_rxstopped
,
335 (di_rxenable_t
) _dma_rxenable
,
336 (di_rxenabled_t
) dma32_rxenabled
,
338 (di_rxfill_t
) _dma_rxfill
,
339 (di_rxreclaim_t
) _dma_rxreclaim
,
340 (di_getnextrxp_t
) _dma_getnextrxp
,
341 (di_peeknextrxp_t
) _dma_peeknextrxp
,
342 (di_rxparam_get_t
) _dma_rx_param_get
,
344 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
345 (di_getvar_t
) _dma_getvar
,
346 (di_counterreset_t
) _dma_counterreset
,
347 (di_ctrlflags_t
) _dma_ctrlflags
,
351 (di_rxactive_t
) _dma_rxactive
,
352 (di_txpending_t
) _dma_txpending
,
353 (di_txcommitted_t
) _dma_txcommitted
,
357 hnddma_t
*dma_attach(osl_t
*osh
, char *name
, si_t
*sih
, void *dmaregstx
,
358 void *dmaregsrx
, uint ntxd
, uint nrxd
, uint rxbufsize
,
359 int rxextheadroom
, uint nrxpost
, uint rxoffset
,
365 /* allocate private info structure */
366 di
= MALLOC(osh
, sizeof(dma_info_t
));
369 printf("dma_attach: out of memory, malloced %d bytes\n",
375 bzero((char *)di
, sizeof(dma_info_t
));
377 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
379 /* old chips w/o sb is no longer supported */
384 ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
388 /* check arguments */
389 ASSERT(ISPOWEROF2(ntxd
));
390 ASSERT(ISPOWEROF2(nrxd
));
393 ASSERT(dmaregsrx
== NULL
);
395 ASSERT(dmaregstx
== NULL
);
397 /* init dma reg pointer */
398 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
399 ASSERT(ntxd
<= D64MAXDD
);
400 ASSERT(nrxd
<= D64MAXDD
);
401 di
->d64txregs
= (dma64regs_t
*) dmaregstx
;
402 di
->d64rxregs
= (dma64regs_t
*) dmaregsrx
;
403 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
404 } else if (DMA32_ENAB(di
)) {
405 ASSERT(ntxd
<= D32MAXDD
);
406 ASSERT(nrxd
<= D32MAXDD
);
407 di
->d32txregs
= (dma32regs_t
*) dmaregstx
;
408 di
->d32rxregs
= (dma32regs_t
*) dmaregsrx
;
409 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma32proc
;
411 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
416 /* Default flags (which can be changed by the driver calling dma_ctrlflags
417 * before enable): For backwards compatibility both Rx Overflow Continue
418 * and Parity are DISABLED.
421 di
->hnddma
.di_fn
->ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
,
424 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name
, (DMA64_MODE(di
) ? "DMA64" : "DMA32"), osh
, di
->hnddma
.dmactrlflags
, ntxd
, nrxd
, rxbufsize
, rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
426 /* make a private copy of our callers name */
427 strncpy(di
->name
, name
, MAXNAMEL
);
428 di
->name
[MAXNAMEL
- 1] = '\0';
434 di
->ntxd
= (uint16
) ntxd
;
435 di
->nrxd
= (uint16
) nrxd
;
437 /* the actual dma size doesn't include the extra headroom */
439 (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
440 if (rxbufsize
> BCMEXTRAHDROOM
)
441 di
->rxbufsize
= (uint16
) (rxbufsize
- di
->rxextrahdrroom
);
443 di
->rxbufsize
= (uint16
) rxbufsize
;
445 di
->nrxpost
= (uint16
) nrxpost
;
446 di
->rxoffset
= (uint8
) rxoffset
;
449 * figure out the DMA physical address offset for dd and data
450 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
451 * Other bus: use zero
452 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
455 di
->dataoffsetlow
= 0;
456 /* for pci bus, add offset */
457 if (sih
->bustype
== PCI_BUS
) {
458 if ((sih
->buscoretype
== PCIE_CORE_ID
) && DMA64_MODE(di
)) {
459 /* pcie with DMA64 */
461 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
463 /* pci(DMA32/DMA64) or pcie with DMA32 */
464 di
->ddoffsetlow
= SI_PCI_DMA
;
465 di
->ddoffsethigh
= 0;
467 di
->dataoffsetlow
= di
->ddoffsetlow
;
468 di
->dataoffsethigh
= di
->ddoffsethigh
;
470 #if defined(__mips__) && defined(IL_BIGENDIAN)
471 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
472 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
473 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
474 if ((si_coreid(sih
) == SDIOD_CORE_ID
)
475 && ((si_corerev(sih
) > 0) && (si_corerev(sih
) <= 2)))
477 else if ((si_coreid(sih
) == I2S_CORE_ID
) &&
478 ((si_corerev(sih
) == 0) || (si_corerev(sih
) == 1)))
481 di
->addrext
= _dma_isaddrext(di
);
483 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
484 di
->aligndesc_4k
= _dma_descriptor_align(di
);
485 if (di
->aligndesc_4k
) {
486 if (DMA64_MODE(di
)) {
487 di
->dmadesc_align
= D64RINGALIGN_BITS
;
488 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
489 /* for smaller dd table, HW relax the alignment requirement */
490 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
493 di
->dmadesc_align
= D32RINGALIGN_BITS
;
495 di
->dmadesc_align
= 4; /* 16 byte alignment */
497 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
498 di
->aligndesc_4k
, di
->dmadesc_align
));
500 /* allocate tx packet pointer vector */
502 size
= ntxd
* sizeof(void *);
503 di
->txp
= MALLOC(osh
, size
);
504 if (di
->txp
== NULL
) {
505 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di
->name
, MALLOCED(osh
)));
508 bzero((char *)di
->txp
, size
);
511 /* allocate rx packet pointer vector */
513 size
= nrxd
* sizeof(void *);
514 di
->rxp
= MALLOC(osh
, size
);
515 if (di
->rxp
== NULL
) {
516 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di
->name
, MALLOCED(osh
)));
519 bzero((char *)di
->rxp
, size
);
522 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
524 if (!_dma_alloc(di
, DMA_TX
))
528 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
530 if (!_dma_alloc(di
, DMA_RX
))
534 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
535 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
536 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di
->name
, (uint32
) PHYSADDRLO(di
->txdpa
)));
539 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
540 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di
->name
, (uint32
) PHYSADDRLO(di
->rxdpa
)));
545 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
, di
->addrext
));
547 /* allocate DMA mapping vectors */
548 if (DMASGLIST_ENAB
) {
550 size
= ntxd
* sizeof(hnddma_seg_map_t
);
551 di
->txp_dmah
= (hnddma_seg_map_t
*) MALLOC(osh
, size
);
552 if (di
->txp_dmah
== NULL
)
554 bzero((char *)di
->txp_dmah
, size
);
558 size
= nrxd
* sizeof(hnddma_seg_map_t
);
559 di
->rxp_dmah
= (hnddma_seg_map_t
*) MALLOC(osh
, size
);
560 if (di
->rxp_dmah
== NULL
)
562 bzero((char *)di
->rxp_dmah
, size
);
566 return (hnddma_t
*) di
;
573 /* init the tx or rx descriptor */
575 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, dmaaddr_t pa
, uint outidx
,
576 uint32
*flags
, uint32 bufcount
)
578 /* dma32 uses 32-bit control to fit both flags and bufcounter */
579 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
581 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
582 W_SM(&ddring
[outidx
].addr
,
583 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
584 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
586 /* address extension */
589 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
590 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
592 *flags
|= (ae
<< CTRL_AE_SHIFT
);
593 W_SM(&ddring
[outidx
].addr
,
594 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
595 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
599 /* Check for odd number of 1's */
600 STATIC INLINE uint32
parity32(uint32 data
)
611 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
614 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
,
615 uint32
*flags
, uint32 bufcount
)
617 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
619 /* PCI bus with big(>1G) physical address, use address extension */
620 #if defined(__mips__) && defined(IL_BIGENDIAN)
621 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
)
622 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
624 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
625 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
626 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
628 W_SM(&ddring
[outidx
].addrlow
,
629 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
630 W_SM(&ddring
[outidx
].addrhigh
,
631 BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
632 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
633 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
635 /* address extension for 32-bit PCI */
639 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
640 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
641 ASSERT(PHYSADDRHI(pa
) == 0);
643 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
644 W_SM(&ddring
[outidx
].addrlow
,
645 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
646 W_SM(&ddring
[outidx
].addrhigh
,
647 BUS_SWAP32(0 + di
->dataoffsethigh
));
648 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
649 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
651 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) {
652 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
653 W_SM(&ddring
[outidx
].ctrl2
,
654 BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
659 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
663 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
664 w
= R_REG(osh
, &dma32regs
->control
);
665 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
666 return (w
& XC_AE
) == XC_AE
;
669 static bool _dma_alloc(dma_info_t
*di
, uint direction
)
671 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
672 return dma64_alloc(di
, direction
);
673 } else if (DMA32_ENAB(di
)) {
674 return dma32_alloc(di
, direction
);
679 /* !! may be called with core in reset */
680 static void _dma_detach(dma_info_t
*di
)
683 DMA_TRACE(("%s: dma_detach\n", di
->name
));
685 /* shouldn't be here if descriptors are unreclaimed */
686 ASSERT(di
->txin
== di
->txout
);
687 ASSERT(di
->rxin
== di
->rxout
);
689 /* free dma descriptor rings */
690 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
692 DMA_FREE_CONSISTENT(di
->osh
,
693 ((int8
*) (uintptr
) di
->txd64
-
694 di
->txdalign
), di
->txdalloc
,
695 (di
->txdpaorig
), &di
->tx_dmah
);
697 DMA_FREE_CONSISTENT(di
->osh
,
698 ((int8
*) (uintptr
) di
->rxd64
-
699 di
->rxdalign
), di
->rxdalloc
,
700 (di
->rxdpaorig
), &di
->rx_dmah
);
701 } else if (DMA32_ENAB(di
)) {
703 DMA_FREE_CONSISTENT(di
->osh
,
704 ((int8
*) (uintptr
) di
->txd32
-
705 di
->txdalign
), di
->txdalloc
,
706 (di
->txdpaorig
), &di
->tx_dmah
);
708 DMA_FREE_CONSISTENT(di
->osh
,
709 ((int8
*) (uintptr
) di
->rxd32
-
710 di
->rxdalign
), di
->rxdalloc
,
711 (di
->rxdpaorig
), &di
->rx_dmah
);
715 /* free packet pointer vectors */
717 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
719 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
721 /* free tx packet DMA handles */
723 MFREE(di
->osh
, (void *)di
->txp_dmah
,
724 di
->ntxd
* sizeof(hnddma_seg_map_t
));
726 /* free rx packet DMA handles */
728 MFREE(di
->osh
, (void *)di
->rxp_dmah
,
729 di
->nrxd
* sizeof(hnddma_seg_map_t
));
731 /* free our private info structure */
732 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
736 static bool _dma_descriptor_align(dma_info_t
*di
)
738 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
741 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
742 if (di
->d64txregs
!= NULL
) {
743 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xff0);
744 addrl
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
747 } else if (di
->d64rxregs
!= NULL
) {
748 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xff0);
749 addrl
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
757 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
758 static bool _dma_isaddrext(dma_info_t
*di
)
760 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
761 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
763 /* not all tx or rx channel are available */
764 if (di
->d64txregs
!= NULL
) {
765 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
766 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di
->name
));
770 } else if (di
->d64rxregs
!= NULL
) {
771 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
772 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di
->name
));
778 } else if (DMA32_ENAB(di
)) {
780 return _dma32_addrext(di
->osh
, di
->d32txregs
);
781 else if (di
->d32rxregs
)
782 return _dma32_addrext(di
->osh
, di
->d32rxregs
);
789 /* initialize descriptor table base address */
790 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
792 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
793 if (!di
->aligndesc_4k
) {
794 if (direction
== DMA_TX
)
795 di
->xmtptrbase
= PHYSADDRLO(pa
);
797 di
->rcvptrbase
= PHYSADDRLO(pa
);
800 if ((di
->ddoffsetlow
== 0)
801 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
802 if (direction
== DMA_TX
) {
803 W_REG(di
->osh
, &di
->d64txregs
->addrlow
,
804 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
805 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
,
806 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
808 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
,
809 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
810 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
,
811 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
814 /* DMA64 32bits address extension */
817 ASSERT(PHYSADDRHI(pa
) == 0);
819 /* shift the high bit(s) from pa to ae */
820 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >>
821 PCI32ADDR_HIGH_SHIFT
;
822 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
824 if (direction
== DMA_TX
) {
825 W_REG(di
->osh
, &di
->d64txregs
->addrlow
,
826 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
827 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
,
829 SET_REG(di
->osh
, &di
->d64txregs
->control
,
830 D64_XC_AE
, (ae
<< D64_XC_AE_SHIFT
));
832 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
,
833 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
834 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
,
836 SET_REG(di
->osh
, &di
->d64rxregs
->control
,
837 D64_RC_AE
, (ae
<< D64_RC_AE_SHIFT
));
841 } else if (DMA32_ENAB(di
)) {
842 ASSERT(PHYSADDRHI(pa
) == 0);
843 if ((di
->ddoffsetlow
== 0)
844 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
845 if (direction
== DMA_TX
)
846 W_REG(di
->osh
, &di
->d32txregs
->addr
,
847 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
849 W_REG(di
->osh
, &di
->d32rxregs
->addr
,
850 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
852 /* dma32 address extension */
856 /* shift the high bit(s) from pa to ae */
857 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >>
858 PCI32ADDR_HIGH_SHIFT
;
859 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
861 if (direction
== DMA_TX
) {
862 W_REG(di
->osh
, &di
->d32txregs
->addr
,
863 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
864 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
,
867 W_REG(di
->osh
, &di
->d32rxregs
->addr
,
868 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
869 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
,
877 static void _dma_fifoloopbackenable(dma_info_t
*di
)
879 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
881 if (DMA64_ENAB(di
) && DMA64_MODE(di
))
882 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
883 else if (DMA32_ENAB(di
))
884 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
889 static void _dma_rxinit(dma_info_t
*di
)
891 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
896 di
->rxin
= di
->rxout
= 0;
898 /* clear rx descriptor ring */
899 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
900 BZERO_SM((void *)(uintptr
) di
->rxd64
,
901 (di
->nrxd
* sizeof(dma64dd_t
)));
903 /* DMA engine with out alignment requirement requires table to be inited
904 * before enabling the engine
906 if (!di
->aligndesc_4k
)
907 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
911 if (di
->aligndesc_4k
)
912 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
913 } else if (DMA32_ENAB(di
)) {
914 BZERO_SM((void *)(uintptr
) di
->rxd32
,
915 (di
->nrxd
* sizeof(dma32dd_t
)));
917 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
922 static void _dma_rxenable(dma_info_t
*di
)
924 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
926 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
928 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
930 (R_REG(di
->osh
, &di
->d64rxregs
->control
) & D64_RC_AE
) |
933 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
934 control
|= D64_RC_PD
;
936 if (dmactrlflags
& DMA_CTRL_ROC
)
937 control
|= D64_RC_OC
;
939 W_REG(di
->osh
, &di
->d64rxregs
->control
,
940 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
941 } else if (DMA32_ENAB(di
)) {
943 (R_REG(di
->osh
, &di
->d32rxregs
->control
) & RC_AE
) | RC_RE
;
945 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
948 if (dmactrlflags
& DMA_CTRL_ROC
)
951 W_REG(di
->osh
, &di
->d32rxregs
->control
,
952 ((di
->rxoffset
<< RC_RO_SHIFT
) | control
));
958 _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
)
960 /* the normal values fit into 16 bits */
961 *rxoffset
= (uint16
) di
->rxoffset
;
962 *rxbufsize
= (uint16
) di
->rxbufsize
;
965 /* !! rx entry routine
966 * returns a pointer to the next frame received, or NULL if there are no more
967 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
969 * otherwise, it's treated as giant pkt and will be tossed.
970 * The DMA scattering starts with normal DMA header, followed by first buffer data.
971 * After it reaches the max size of buffer, the data continues in next DMA descriptor
972 * buffer WITHOUT DMA header
974 static void *BCMFASTPATH
_dma_rx(dma_info_t
*di
)
976 void *p
, *head
, *tail
;
982 head
= _dma_getnextrxp(di
, FALSE
);
986 len
= ltoh16(*(uint16
*) (PKTDATA(head
)));
987 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
989 #if defined(__mips__)
991 while (!(len
= *(uint16
*) OSL_UNCACHED(PKTDATA(head
))))
994 *(uint16
*) PKTDATA(head
) = htol16((uint16
) len
);
996 #endif /* defined(__mips__) */
998 /* set actual length */
999 pkt_len
= MIN((di
->rxoffset
+ len
), di
->rxbufsize
);
1000 PKTSETLEN(head
, pkt_len
);
1001 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
1003 /* check for single or multi-buffer rx */
1006 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, FALSE
))) {
1007 PKTSETNEXT(tail
, p
);
1008 pkt_len
= MIN(resid
, (int)di
->rxbufsize
);
1009 PKTSETLEN(p
, pkt_len
);
1012 resid
-= di
->rxbufsize
;
1019 cur
= (DMA64_ENAB(di
) && DMA64_MODE(di
)) ?
1020 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) &
1022 di
->rcvptrbase
) & D64_RS0_CD_MASK
,
1023 dma64dd_t
) : B2I(R_REG(di
->osh
,
1025 status
) & RS_CD_MASK
,
1027 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1028 di
->rxin
, di
->rxout
, cur
));
1032 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
1033 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1035 PKTFREE(di
->osh
, head
, FALSE
);
1036 di
->hnddma
.rxgiants
++;
1044 /* post receive buffers
1045 * return FALSE is refill failed completely and ring is empty
1046 * this will stall the rx dma and user might want to call rxfill again asap
1047 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1049 static bool BCMFASTPATH
_dma_rxfill(dma_info_t
*di
)
1057 uint extra_offset
= 0;
1063 * Determine how many receive buffers we're lacking
1064 * from the full complement, allocate, initialize,
1065 * and post them, then update the chip rx lastdscr.
1071 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
1073 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1075 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1076 extra_offset
= di
->rxextrahdrroom
;
1078 for (i
= 0; i
< n
; i
++) {
1079 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1080 size to be allocated
1083 p
= osl_pktget(di
->osh
, di
->rxbufsize
+ extra_offset
);
1086 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1089 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1090 if (dma64_rxidle(di
)) {
1091 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di
->name
));
1094 } else if (DMA32_ENAB(di
)) {
1095 if (dma32_rxidle(di
)) {
1096 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di
->name
));
1102 di
->hnddma
.rxnobuf
++;
1105 /* reserve an extra headroom, if applicable */
1107 PKTPULL(p
, extra_offset
);
1109 /* Do a cached write instead of uncached write since DMA_MAP
1110 * will flush the cache.
1112 *(uint32
*) (PKTDATA(p
)) = 0;
1115 bzero(&di
->rxp_dmah
[rxout
], sizeof(hnddma_seg_map_t
));
1117 pa
= DMA_MAP(di
->osh
, PKTDATA(p
),
1118 di
->rxbufsize
, DMA_RX
, p
, &di
->rxp_dmah
[rxout
]);
1120 ASSERT(ISALIGNED(PHYSADDRLO(pa
), 4));
1122 /* save the free packet pointer */
1123 ASSERT(di
->rxp
[rxout
] == NULL
);
1126 /* reset flags for each descriptor */
1128 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1129 if (rxout
== (di
->nrxd
- 1))
1130 flags
= D64_CTRL1_EOT
;
1132 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
,
1134 } else if (DMA32_ENAB(di
)) {
1135 if (rxout
== (di
->nrxd
- 1))
1138 ASSERT(PHYSADDRHI(pa
) == 0);
1139 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
,
1143 rxout
= NEXTRXD(rxout
);
1148 /* update the chip lastdscr pointer */
1149 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1150 W_REG(di
->osh
, &di
->d64rxregs
->ptr
,
1151 di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1152 } else if (DMA32_ENAB(di
)) {
1153 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
1160 /* like getnexttxp but no reclaim */
1161 static void *_dma_peeknexttxp(dma_info_t
*di
)
1168 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1170 B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) &
1171 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1173 } else if (DMA32_ENAB(di
)) {
1175 B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
,
1180 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1187 /* like getnextrxp but not take off the ring */
1188 static void *_dma_peeknextrxp(dma_info_t
*di
)
1195 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1197 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) &
1198 D64_RS0_CD_MASK
) - di
->rcvptrbase
) & D64_RS0_CD_MASK
,
1200 } else if (DMA32_ENAB(di
)) {
1202 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1207 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1214 static void _dma_rxreclaim(dma_info_t
*di
)
1218 /* "unused local" warning suppression for OSLs that
1219 * define PKTFREE() without using the di->osh arg
1223 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1225 while ((p
= _dma_getnextrxp(di
, TRUE
)))
1226 PKTFREE(di
->osh
, p
, FALSE
);
1229 static void *BCMFASTPATH
_dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1234 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1235 return dma64_getnextrxp(di
, forceall
);
1236 } else if (DMA32_ENAB(di
)) {
1237 return dma32_getnextrxp(di
, forceall
);
1242 static void _dma_txblock(dma_info_t
*di
)
1244 di
->hnddma
.txavail
= 0;
1247 static void _dma_txunblock(dma_info_t
*di
)
1249 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1252 static uint
_dma_txactive(dma_info_t
*di
)
1254 return NTXDACTIVE(di
->txin
, di
->txout
);
1257 static uint
_dma_txpending(dma_info_t
*di
)
1261 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1263 B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) &
1264 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1266 } else if (DMA32_ENAB(di
)) {
1268 B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
,
1273 return NTXDACTIVE(curr
, di
->txout
);
1276 static uint
_dma_txcommitted(dma_info_t
*di
)
1279 uint txin
= di
->txin
;
1281 if (txin
== di
->txout
)
1284 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1285 ptr
= B2I(R_REG(di
->osh
, &di
->d64txregs
->ptr
), dma64dd_t
);
1286 } else if (DMA32_ENAB(di
)) {
1287 ptr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->ptr
), dma32dd_t
);
1291 return NTXDACTIVE(di
->txin
, ptr
);
1294 static uint
_dma_rxactive(dma_info_t
*di
)
1296 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1299 static void _dma_counterreset(dma_info_t
*di
)
1301 /* reset all software counter */
1302 di
->hnddma
.rxgiants
= 0;
1303 di
->hnddma
.rxnobuf
= 0;
1304 di
->hnddma
.txnobuf
= 0;
1307 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1309 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1312 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di
->name
));
1316 ASSERT((flags
& ~mask
) == 0);
1318 dmactrlflags
&= ~mask
;
1319 dmactrlflags
|= flags
;
1321 /* If trying to enable parity, check if parity is actually supported */
1322 if (dmactrlflags
& DMA_CTRL_PEN
) {
1325 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1326 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1327 W_REG(di
->osh
, &di
->d64txregs
->control
,
1328 control
| D64_XC_PD
);
1329 if (R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_PD
) {
1330 /* We *can* disable it so it is supported,
1331 * restore control register
1333 W_REG(di
->osh
, &di
->d64txregs
->control
,
1336 /* Not supported, don't allow it to be enabled */
1337 dmactrlflags
&= ~DMA_CTRL_PEN
;
1339 } else if (DMA32_ENAB(di
)) {
1340 control
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1341 W_REG(di
->osh
, &di
->d32txregs
->control
,
1343 if (R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_PD
) {
1344 W_REG(di
->osh
, &di
->d32txregs
->control
,
1347 /* Not supported, don't allow it to be enabled */
1348 dmactrlflags
&= ~DMA_CTRL_PEN
;
1354 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1356 return dmactrlflags
;
1359 /* get the address of the var in order to change later */
1360 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
)
1362 if (!strcmp(name
, "&txavail"))
1363 return (uintptr
) & (di
->hnddma
.txavail
);
1370 void dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
1372 OR_REG(osh
, ®s
->control
, XC_LE
);
1376 uint8
dma_align_sizetobits(uint size
)
1380 ASSERT(!(size
& (size
- 1)));
1381 while (size
>>= 1) {
1387 /* This function ensures that the DMA descriptor ring will not get allocated
1388 * across Page boundary. If the allocation is done across the page boundary
1389 * at the first time, then it is freed and the allocation is done at
1390 * descriptor ring size aligned location. This will ensure that the ring will
1391 * not cross page boundary
1393 static void *dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
,
1394 uint16
*alignbits
, uint
*alloced
,
1395 dmaaddr_t
*descpa
, osldma_t
**dmah
)
1398 uint32 desc_strtaddr
;
1399 uint32 alignbytes
= 1 << *alignbits
;
1401 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
,
1406 desc_strtaddr
= (uint32
) ROUNDUP((uintptr
) va
, alignbytes
);
1407 if (((desc_strtaddr
+ size
- 1) & boundary
) != (desc_strtaddr
1409 *alignbits
= dma_align_sizetobits(size
);
1410 DMA_FREE_CONSISTENT(osh
, va
, size
, *descpa
, dmah
);
1411 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
,
1417 /* 32-bit DMA functions */
1419 static void dma32_txinit(dma_info_t
*di
)
1421 uint32 control
= XC_XE
;
1423 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1428 di
->txin
= di
->txout
= 0;
1429 di
->hnddma
.txavail
= di
->ntxd
- 1;
1431 /* clear tx descriptor ring */
1432 BZERO_SM((void *)(uintptr
) di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
1434 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1436 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1437 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1440 static bool dma32_txenabled(dma_info_t
*di
)
1444 /* If the chip is dead, it is not enabled :-) */
1445 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1446 return (xc
!= 0xffffffff) && (xc
& XC_XE
);
1449 static void dma32_txsuspend(dma_info_t
*di
)
1451 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1456 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1459 static void dma32_txresume(dma_info_t
*di
)
1461 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1466 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1469 static bool dma32_txsuspended(dma_info_t
*di
)
1471 return (di
->ntxd
== 0)
1472 || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1475 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
)
1479 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1480 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1482 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
1485 if (di
->txin
== di
->txout
)
1488 while ((p
= dma32_getnexttxp(di
, range
)))
1489 PKTFREE(di
->osh
, p
, TRUE
);
1492 static bool dma32_txstopped(dma_info_t
*di
)
1494 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1498 static bool dma32_rxstopped(dma_info_t
*di
)
1500 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) ==
1504 static bool dma32_alloc(dma_info_t
*di
, uint direction
)
1513 ddlen
= sizeof(dma32dd_t
);
1515 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1518 align_bits
= di
->dmadesc_align
;
1519 align
= (1 << align_bits
);
1521 if (direction
== DMA_TX
) {
1522 va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
,
1523 &alloced
, &di
->txdpaorig
, &di
->tx_dmah
);
1525 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di
->name
));
1529 PHYSADDRHISET(di
->txdpa
, 0);
1530 ASSERT(PHYSADDRHI(di
->txdpaorig
) == 0);
1531 di
->txd32
= (dma32dd_t
*) ROUNDUP((uintptr
) va
, align
);
1533 (uint
) ((int8
*) (uintptr
) di
->txd32
- (int8
*) va
);
1535 PHYSADDRLOSET(di
->txdpa
,
1536 PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
1537 /* Make sure that alignment didn't overflow */
1538 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
1540 di
->txdalloc
= alloced
;
1541 ASSERT(ISALIGNED((uintptr
) di
->txd32
, align
));
1543 va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
,
1544 &alloced
, &di
->rxdpaorig
, &di
->rx_dmah
);
1546 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di
->name
));
1550 PHYSADDRHISET(di
->rxdpa
, 0);
1551 ASSERT(PHYSADDRHI(di
->rxdpaorig
) == 0);
1552 di
->rxd32
= (dma32dd_t
*) ROUNDUP((uintptr
) va
, align
);
1554 (uint
) ((int8
*) (uintptr
) di
->rxd32
- (int8
*) va
);
1556 PHYSADDRLOSET(di
->rxdpa
,
1557 PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
1558 /* Make sure that alignment didn't overflow */
1559 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
1560 di
->rxdalloc
= alloced
;
1561 ASSERT(ISALIGNED((uintptr
) di
->rxd32
, align
));
1567 static bool dma32_txreset(dma_info_t
*di
)
1574 /* suspend tx DMA first */
1575 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1577 (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1578 != XS_XS_DISABLED
) && (status
!= XS_XS_IDLE
)
1579 && (status
!= XS_XS_STOPPED
), (10000));
1581 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
1582 SPINWAIT(((status
= (R_REG(di
->osh
,
1583 &di
->d32txregs
->status
) & XS_XS_MASK
)) !=
1584 XS_XS_DISABLED
), 10000);
1586 /* wait for the last transaction to complete */
1589 return status
== XS_XS_DISABLED
;
1592 static bool dma32_rxidle(dma_info_t
*di
)
1594 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1599 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1600 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
1603 static bool dma32_rxreset(dma_info_t
*di
)
1610 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
1611 SPINWAIT(((status
= (R_REG(di
->osh
,
1612 &di
->d32rxregs
->status
) & RS_RS_MASK
)) !=
1613 RS_RS_DISABLED
), 10000);
1615 return status
== RS_RS_DISABLED
;
1618 static bool dma32_rxenabled(dma_info_t
*di
)
1622 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
1623 return (rc
!= 0xffffffff) && (rc
& RC_RE
);
1626 static bool dma32_txsuspendedidle(dma_info_t
*di
)
1631 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1634 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1638 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1642 /* !! tx entry routine
1643 * supports full 32bit dma engine buffer addressing so
1644 * dma buffers can cross 4 Kbyte page boundaries.
1646 * WARNING: call must check the return value for error.
1647 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1649 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1658 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1663 * Walk the chain of packet buffers
1664 * allocating and initializing transmit descriptor entries.
1666 for (p
= p0
; p
; p
= next
) {
1668 hnddma_seg_map_t
*map
;
1673 len
+= PKTDMAPAD(di
->osh
, p
);
1677 /* return nonzero if out of tx descriptors */
1678 if (NEXTTXD(txout
) == di
->txin
)
1685 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
1687 /* get physical address of buffer start */
1688 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
,
1689 &di
->txp_dmah
[txout
]);
1691 if (DMASGLIST_ENAB
) {
1692 map
= &di
->txp_dmah
[txout
];
1694 /* See if all the segments can be accounted for */
1696 (uint
) (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) -
1704 for (j
= 1; j
<= nsegs
; j
++) {
1706 if (p
== p0
&& j
== 1)
1709 /* With a DMA segment list, Descriptor table is filled
1710 * using the segment list instead of looping over
1711 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1712 * end of segment list is reached.
1714 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
1715 (DMASGLIST_ENAB
&& j
== nsegs
))
1716 flags
|= (CTRL_IOC
| CTRL_EOF
);
1717 if (txout
== (di
->ntxd
- 1))
1720 if (DMASGLIST_ENAB
) {
1721 len
= map
->segs
[j
- 1].length
;
1722 pa
= map
->segs
[j
- 1].addr
;
1724 ASSERT(PHYSADDRHI(pa
) == 0);
1726 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
1727 ASSERT(di
->txp
[txout
] == NULL
);
1729 txout
= NEXTTXD(txout
);
1732 /* See above. No need to loop over individual buffers */
1737 /* if last txd eof not set, fix it */
1738 if (!(flags
& CTRL_EOF
))
1739 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
,
1740 BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
1742 /* save the packet */
1743 di
->txp
[PREVTXD(txout
)] = p0
;
1745 /* bump the tx descriptor index */
1750 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
1752 /* tx flow control */
1753 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1758 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1759 PKTFREE(di
->osh
, p0
, TRUE
);
1760 di
->hnddma
.txavail
= 0;
1761 di
->hnddma
.txnobuf
++;
1766 * Reclaim next completed txd (txds if using chained buffers) in the range
1767 * specified and return associated packet.
1768 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1769 * transmitted as noted by the hardware "CurrDescr" pointer.
1770 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1771 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1772 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1773 * return associated packet regardless of the value of hardware pointers.
1775 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
)
1777 uint16 start
, end
, i
;
1781 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
1782 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1784 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
1793 if (range
== HNDDMA_RANGE_ALL
)
1796 dma32regs_t
*dregs
= di
->d32txregs
;
1799 (uint16
) B2I(R_REG(di
->osh
, &dregs
->status
) & XS_CD_MASK
,
1802 if (range
== HNDDMA_RANGE_TRANSFERED
) {
1804 (uint16
) ((R_REG(di
->osh
, &dregs
->status
) &
1805 XS_AD_MASK
) >> XS_AD_SHIFT
);
1806 active_desc
= (uint16
) B2I(active_desc
, dma32dd_t
);
1807 if (end
!= active_desc
)
1808 end
= PREVTXD(active_desc
);
1812 if ((start
== 0) && (end
> di
->txout
))
1815 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1817 hnddma_seg_map_t
*map
= NULL
;
1818 uint size
, j
, nsegs
;
1821 (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) -
1822 di
->dataoffsetlow
));
1823 PHYSADDRHISET(pa
, 0);
1825 if (DMASGLIST_ENAB
) {
1826 map
= &di
->txp_dmah
[i
];
1827 size
= map
->origsize
;
1831 (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) &
1836 for (j
= nsegs
; j
> 0; j
--) {
1837 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
1845 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
1850 /* tx flow control */
1851 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1856 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start
, end
, di
->txout
, forceall
));
1860 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
1865 /* if forcing, dma engine must be disabled */
1866 ASSERT(!forceall
|| !dma32_rxenabled(di
));
1870 /* return if no packets posted */
1875 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1877 /* ignore curr if forceall */
1878 if (!forceall
&& (i
== curr
))
1881 /* get the packet pointer that corresponds to the rx descriptor */
1887 (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) -
1888 di
->dataoffsetlow
));
1889 PHYSADDRHISET(pa
, 0);
1891 /* clear this packet from the descriptor ring */
1892 DMA_UNMAP(di
->osh
, pa
, di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1894 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
1896 di
->rxin
= NEXTRXD(i
);
1902 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1904 static void dma32_txrotate(dma_info_t
*di
)
1913 ASSERT(dma32_txsuspendedidle(di
));
1915 nactive
= _dma_txactive(di
);
1917 (((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
)
1918 >> XS_AD_SHIFT
), dma32dd_t
));
1919 rot
= TXD(ad
- di
->txin
);
1921 ASSERT(rot
< di
->ntxd
);
1923 /* full-ring case is a lot harder - don't worry about this */
1924 if (rot
>= (di
->ntxd
- nactive
)) {
1925 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1930 last
= PREVTXD(di
->txout
);
1932 /* move entries starting at last and moving backwards to first */
1933 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1934 new = TXD(old
+ rot
);
1937 * Move the tx dma descriptor.
1938 * EOT is set only in the last entry in the ring.
1940 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1941 if (new == (di
->ntxd
- 1))
1943 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
1944 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
1946 /* zap the old tx dma descriptor address field */
1947 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
1949 /* move the corresponding txp[] entry */
1950 ASSERT(di
->txp
[new] == NULL
);
1951 di
->txp
[new] = di
->txp
[old
];
1953 /* Move the segment map as well */
1954 if (DMASGLIST_ENAB
) {
1955 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new],
1956 sizeof(hnddma_seg_map_t
));
1957 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
1960 di
->txp
[old
] = NULL
;
1963 /* update txin and txout */
1965 di
->txout
= TXD(di
->txout
+ rot
);
1966 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1969 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
1972 /* 64-bit DMA functions */
1974 static void dma64_txinit(dma_info_t
*di
)
1976 uint32 control
= D64_XC_XE
;
1978 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1983 di
->txin
= di
->txout
= 0;
1984 di
->hnddma
.txavail
= di
->ntxd
- 1;
1986 /* clear tx descriptor ring */
1987 BZERO_SM((void *)(uintptr
) di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
1989 /* DMA engine with out alignment requirement requires table to be inited
1990 * before enabling the engine
1992 if (!di
->aligndesc_4k
)
1993 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1995 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1996 control
|= D64_XC_PD
;
1997 OR_REG(di
->osh
, &di
->d64txregs
->control
, control
);
1999 /* DMA engine with alignment requirement requires table to be inited
2000 * before enabling the engine
2002 if (di
->aligndesc_4k
)
2003 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2006 static bool dma64_txenabled(dma_info_t
*di
)
2010 /* If the chip is dead, it is not enabled :-) */
2011 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2012 return (xc
!= 0xffffffff) && (xc
& D64_XC_XE
);
2015 static void dma64_txsuspend(dma_info_t
*di
)
2017 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
2022 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2025 static void dma64_txresume(dma_info_t
*di
)
2027 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2032 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
2035 static bool dma64_txsuspended(dma_info_t
*di
)
2037 return (di
->ntxd
== 0) ||
2038 ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) ==
2042 static void BCMFASTPATH
dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
2046 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2047 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2049 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
2052 if (di
->txin
== di
->txout
)
2055 while ((p
= dma64_getnexttxp(di
, range
))) {
2056 /* For unframed data, we don't have any packets to free */
2057 if (!(di
->hnddma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
2058 PKTFREE(di
->osh
, p
, TRUE
);
2062 static bool dma64_txstopped(dma_info_t
*di
)
2064 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
2065 D64_XS0_XS_STOPPED
);
2068 static bool dma64_rxstopped(dma_info_t
*di
)
2070 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) ==
2071 D64_RS0_RS_STOPPED
);
2074 static bool dma64_alloc(dma_info_t
*di
, uint direction
)
2083 ddlen
= sizeof(dma64dd_t
);
2085 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2086 align_bits
= di
->dmadesc_align
;
2087 align
= (1 << align_bits
);
2089 if (direction
== DMA_TX
) {
2090 va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
,
2091 &alloced
, &di
->txdpaorig
, &di
->tx_dmah
);
2093 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di
->name
));
2096 align
= (1 << align_bits
);
2097 di
->txd64
= (dma64dd_t
*) ROUNDUP((uintptr
) va
, align
);
2099 (uint
) ((int8
*) (uintptr
) di
->txd64
- (int8
*) va
);
2100 PHYSADDRLOSET(di
->txdpa
,
2101 PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
2102 /* Make sure that alignment didn't overflow */
2103 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2105 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
2106 di
->txdalloc
= alloced
;
2107 ASSERT(ISALIGNED((uintptr
) di
->txd64
, align
));
2109 va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
,
2110 &alloced
, &di
->rxdpaorig
, &di
->rx_dmah
);
2112 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di
->name
));
2115 align
= (1 << align_bits
);
2116 di
->rxd64
= (dma64dd_t
*) ROUNDUP((uintptr
) va
, align
);
2118 (uint
) ((int8
*) (uintptr
) di
->rxd64
- (int8
*) va
);
2119 PHYSADDRLOSET(di
->rxdpa
,
2120 PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
2121 /* Make sure that alignment didn't overflow */
2122 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2124 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
2125 di
->rxdalloc
= alloced
;
2126 ASSERT(ISALIGNED((uintptr
) di
->rxd64
, align
));
2132 static bool dma64_txreset(dma_info_t
*di
)
2139 /* suspend tx DMA first */
2140 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2142 (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
2143 != D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
2144 && (status
!= D64_XS0_XS_STOPPED
), 10000);
2146 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
2148 (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
2149 != D64_XS0_XS_DISABLED
), 10000);
2151 /* wait for the last transaction to complete */
2154 return status
== D64_XS0_XS_DISABLED
;
2157 static bool dma64_rxidle(dma_info_t
*di
)
2159 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2164 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
2165 (R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
2168 static bool dma64_rxreset(dma_info_t
*di
)
2175 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
2177 (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
))
2178 != D64_RS0_RS_DISABLED
), 10000);
2180 return status
== D64_RS0_RS_DISABLED
;
2183 static bool dma64_rxenabled(dma_info_t
*di
)
2187 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
2188 return (rc
!= 0xffffffff) && (rc
& D64_RC_RE
);
2191 static bool dma64_txsuspendedidle(dma_info_t
*di
)
2197 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
2200 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
2207 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2208 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2209 * If DMA is idle, we return NULL.
2211 static void *dma64_getpos(dma_info_t
*di
, bool direction
)
2217 if (direction
== DMA_TX
) {
2219 R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
;
2220 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
2221 va
= di
->txp
[B2I(cd_offset
, dma64dd_t
)];
2224 R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_XS0_CD_MASK
;
2225 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
2226 va
= di
->rxp
[B2I(cd_offset
, dma64dd_t
)];
2229 /* If DMA is IDLE, return NULL */
2231 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__
));
2238 /* TX of unframed data
2240 * Adds a DMA ring descriptor for the data pointed to by "buf".
2241 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2242 * that take a pointer to a "packet"
2243 * Each call to this is results in a single descriptor being added for "len" bytes of
2244 * data starting at "buf", it doesn't handle chained buffers.
2246 static int dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
2250 dmaaddr_t pa
; /* phys addr */
2254 /* return nonzero if out of tx descriptors */
2255 if (NEXTTXD(txout
) == di
->txin
)
2261 pa
= DMA_MAP(di
->osh
, buf
, len
, DMA_TX
, NULL
, &di
->txp_dmah
[txout
]);
2263 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2265 if (txout
== (di
->ntxd
- 1))
2266 flags
|= D64_CTRL1_EOT
;
2268 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2269 ASSERT(di
->txp
[txout
] == NULL
);
2271 /* save the buffer pointer - used by dma_getpos */
2272 di
->txp
[txout
] = buf
;
2274 txout
= NEXTTXD(txout
);
2275 /* bump the tx descriptor index */
2280 W_REG(di
->osh
, &di
->d64txregs
->ptr
,
2281 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2284 /* tx flow control */
2285 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2290 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __func__
));
2291 di
->hnddma
.txavail
= 0;
2292 di
->hnddma
.txnobuf
++;
2296 /* !! tx entry routine
2297 * WARNING: call must check the return value for error.
2298 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2300 static int BCMFASTPATH
dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2309 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2314 * Walk the chain of packet buffers
2315 * allocating and initializing transmit descriptor entries.
2317 for (p
= p0
; p
; p
= next
) {
2319 hnddma_seg_map_t
*map
;
2324 len
+= PKTDMAPAD(di
->osh
, p
);
2325 #endif /* BCM_DMAPAD */
2328 /* return nonzero if out of tx descriptors */
2329 if (NEXTTXD(txout
) == di
->txin
)
2335 /* get physical address of buffer start */
2337 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2339 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
,
2340 &di
->txp_dmah
[txout
]);
2342 if (DMASGLIST_ENAB
) {
2343 map
= &di
->txp_dmah
[txout
];
2345 /* See if all the segments can be accounted for */
2347 (uint
) (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) -
2355 for (j
= 1; j
<= nsegs
; j
++) {
2357 if (p
== p0
&& j
== 1)
2358 flags
|= D64_CTRL1_SOF
;
2360 /* With a DMA segment list, Descriptor table is filled
2361 * using the segment list instead of looping over
2362 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2363 * end of segment list is reached.
2365 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2366 (DMASGLIST_ENAB
&& j
== nsegs
))
2367 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2368 if (txout
== (di
->ntxd
- 1))
2369 flags
|= D64_CTRL1_EOT
;
2371 if (DMASGLIST_ENAB
) {
2372 len
= map
->segs
[j
- 1].length
;
2373 pa
= map
->segs
[j
- 1].addr
;
2375 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2376 ASSERT(di
->txp
[txout
] == NULL
);
2378 txout
= NEXTTXD(txout
);
2381 /* See above. No need to loop over individual buffers */
2386 /* if last txd eof not set, fix it */
2387 if (!(flags
& D64_CTRL1_EOF
))
2388 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
2389 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2391 /* save the packet */
2392 di
->txp
[PREVTXD(txout
)] = p0
;
2394 /* bump the tx descriptor index */
2399 W_REG(di
->osh
, &di
->d64txregs
->ptr
,
2400 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2402 /* tx flow control */
2403 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2408 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
2409 PKTFREE(di
->osh
, p0
, TRUE
);
2410 di
->hnddma
.txavail
= 0;
2411 di
->hnddma
.txnobuf
++;
2416 * Reclaim next completed txd (txds if using chained buffers) in the range
2417 * specified and return associated packet.
2418 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2419 * transmitted as noted by the hardware "CurrDescr" pointer.
2420 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2421 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2422 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2423 * return associated packet regardless of the value of hardware pointers.
2425 static void *BCMFASTPATH
dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2427 uint16 start
, end
, i
;
2431 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2432 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2434 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
2443 if (range
== HNDDMA_RANGE_ALL
)
2446 dma64regs_t
*dregs
= di
->d64txregs
;
2450 (((R_REG(di
->osh
, &dregs
->status0
) &
2452 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
2454 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2456 (uint16
) (R_REG(di
->osh
, &dregs
->status1
) &
2459 (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
2460 active_desc
= B2I(active_desc
, dma64dd_t
);
2461 if (end
!= active_desc
)
2462 end
= PREVTXD(active_desc
);
2466 if ((start
== 0) && (end
> di
->txout
))
2469 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2471 hnddma_seg_map_t
*map
= NULL
;
2472 uint size
, j
, nsegs
;
2475 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) -
2476 di
->dataoffsetlow
));
2478 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) -
2479 di
->dataoffsethigh
));
2481 if (DMASGLIST_ENAB
) {
2482 map
= &di
->txp_dmah
[i
];
2483 size
= map
->origsize
;
2487 (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) &
2492 for (j
= nsegs
; j
> 0; j
--) {
2493 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
2494 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
2502 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2507 /* tx flow control */
2508 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2513 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start
, end
, di
->txout
, forceall
));
2517 static void *BCMFASTPATH
dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
2523 /* if forcing, dma engine must be disabled */
2524 ASSERT(!forceall
|| !dma64_rxenabled(di
));
2528 /* return if no packets posted */
2533 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2534 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2536 /* ignore curr if forceall */
2537 if (!forceall
&& (i
== curr
))
2540 /* get the packet pointer that corresponds to the rx descriptor */
2546 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) -
2547 di
->dataoffsetlow
));
2549 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) -
2550 di
->dataoffsethigh
));
2552 /* clear this packet from the descriptor ring */
2553 DMA_UNMAP(di
->osh
, pa
, di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2555 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
2556 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
2558 di
->rxin
= NEXTRXD(i
);
2563 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
* dma64regs
)
2566 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
2567 w
= R_REG(osh
, &dma64regs
->control
);
2568 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
2569 return (w
& D64_XC_AE
) == D64_XC_AE
;
2573 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2575 static void dma64_txrotate(dma_info_t
*di
)
2584 ASSERT(dma64_txsuspendedidle(di
));
2586 nactive
= _dma_txactive(di
);
2588 ((((R_REG(di
->osh
, &di
->d64txregs
->status1
) &
2590 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
));
2591 rot
= TXD(ad
- di
->txin
);
2593 ASSERT(rot
< di
->ntxd
);
2595 /* full-ring case is a lot harder - don't worry about this */
2596 if (rot
>= (di
->ntxd
- nactive
)) {
2597 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2602 last
= PREVTXD(di
->txout
);
2604 /* move entries starting at last and moving backwards to first */
2605 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2606 new = TXD(old
+ rot
);
2609 * Move the tx dma descriptor.
2610 * EOT is set only in the last entry in the ring.
2612 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
2613 if (new == (di
->ntxd
- 1))
2615 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
2617 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
2618 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
2620 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
2621 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
2623 /* zap the old tx dma descriptor address field */
2624 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
2625 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
2627 /* move the corresponding txp[] entry */
2628 ASSERT(di
->txp
[new] == NULL
);
2629 di
->txp
[new] = di
->txp
[old
];
2632 if (DMASGLIST_ENAB
) {
2633 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new],
2634 sizeof(hnddma_seg_map_t
));
2635 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2638 di
->txp
[old
] = NULL
;
2641 /* update txin and txout */
2643 di
->txout
= TXD(di
->txout
+ rot
);
2644 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2647 W_REG(di
->osh
, &di
->d64txregs
->ptr
,
2648 di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
2651 uint
dma_addrwidth(si_t
*sih
, void *dmaregs
)
2653 dma32regs_t
*dma32regs
;
2658 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2659 /* DMA engine is 64-bit capable */
2660 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
2661 /* backplane are 64-bit capable */
2662 if (si_backplane64(sih
))
2663 /* If bus is System Backplane or PCIE then we can access 64-bits */
2664 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
2665 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) &&
2666 (sih
->buscoretype
== PCIE_CORE_ID
)))
2667 return DMADDRWIDTH_64
;
2669 /* DMA64 is always 32-bit capable, AE is always TRUE */
2670 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*) dmaregs
));
2672 return DMADDRWIDTH_32
;
2675 /* Start checking for 32-bit / 30-bit addressing */
2676 dma32regs
= (dma32regs_t
*) dmaregs
;
2678 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2679 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
2680 ((BUSTYPE(sih
->bustype
) == PCI_BUS
)
2681 && sih
->buscoretype
== PCIE_CORE_ID
)
2682 || (_dma32_addrext(osh
, dma32regs
)))
2683 return DMADDRWIDTH_32
;
2686 return DMADDRWIDTH_30
;
This page took 0.130404 seconds and 5 git commands to generate.