staging: brcm80211: fix "ERROR: do not initialise statics to 0 or NULL"
[deliverable/linux.git] / drivers / staging / brcm80211 / util / hnddma.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <typedefs.h>
18 #include <bcmdefs.h>
19 #include <bcmdevs.h>
20 #include <osl.h>
21 #include <bcmendian.h>
22 #include <hndsoc.h>
23 #include <bcmutils.h>
24 #include <siutils.h>
25
26 #include <sbhnddma.h>
27 #include <hnddma.h>
28
29 /* debug/trace */
30 #ifdef BCMDBG
31 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
32 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
33 #else
34 #define DMA_ERROR(args)
35 #define DMA_TRACE(args)
36 #endif /* BCMDBG */
37
38 #define DMA_NONE(args)
39
40 #define d32txregs dregs.d32_u.txregs_32
41 #define d32rxregs dregs.d32_u.rxregs_32
42 #define txd32 dregs.d32_u.txd_32
43 #define rxd32 dregs.d32_u.rxd_32
44
45 #define d64txregs dregs.d64_u.txregs_64
46 #define d64rxregs dregs.d64_u.rxregs_64
47 #define txd64 dregs.d64_u.txd_64
48 #define rxd64 dregs.d64_u.rxd_64
49
50 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
51 static uint dma_msg_level;
52
53 #define MAXNAMEL 8 /* 8 char names */
54
55 #define DI_INFO(dmah) ((dma_info_t *)dmah)
56
57 /* dma engine software state */
58 typedef struct dma_info {
59 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
60 * which could be const
61 */
62 uint *msg_level; /* message level pointer */
63 char name[MAXNAMEL]; /* callers name for diag msgs */
64
65 void *osh; /* os handle */
66 si_t *sih; /* sb handle */
67
68 bool dma64; /* this dma engine is operating in 64-bit mode */
69 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
70
71 union {
72 struct {
73 dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */
74 dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */
75 dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */
76 dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */
77 } d32_u;
78 struct {
79 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
80 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
81 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
82 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
83 } d64_u;
84 } dregs;
85
86 uint16 dmadesc_align; /* alignment requirement for dma descriptors */
87
88 uint16 ntxd; /* # tx descriptors tunable */
89 uint16 txin; /* index of next descriptor to reclaim */
90 uint16 txout; /* index of next descriptor to post */
91 void **txp; /* pointer to parallel array of pointers to packets */
92 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
93 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
94 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
95 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
96 uint16 txdalign; /* #bytes added to alloc'd mem to align txd */
97 uint32 txdalloc; /* #bytes allocated for the ring */
98 uint32 xmtptrbase; /* When using unaligned descriptors, the ptr register
99 * is not just an index, it needs all 13 bits to be
100 * an offset from the addr register.
101 */
102
103 uint16 nrxd; /* # rx descriptors tunable */
104 uint16 rxin; /* index of next descriptor to reclaim */
105 uint16 rxout; /* index of next descriptor to post */
106 void **rxp; /* pointer to parallel array of pointers to packets */
107 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
108 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
109 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
110 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
111 uint16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
112 uint32 rxdalloc; /* #bytes allocated for the ring */
113 uint32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
114
115 /* tunables */
116 uint16 rxbufsize; /* rx buffer size in bytes,
117 * not including the extra headroom
118 */
119 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
120 * e.g. some rx pkt buffers will be bridged to tx side
121 * without byte copying. The extra headroom needs to be
122 * large enough to fit txheader needs.
123 * Some dongle driver may not need it.
124 */
125 uint nrxpost; /* # rx buffers to keep posted */
126 uint rxoffset; /* rxcontrol offset */
127 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
128 uint ddoffsethigh; /* high 32 bits */
129 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
130 uint dataoffsethigh; /* high 32 bits */
131 bool aligndesc_4k; /* descriptor base need to be aligned or not */
132 } dma_info_t;
133
134 /*
135 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
136 * Otherwise it will support only 64-bit.
137 *
138 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
139 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
140 *
141 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
142 */
143 #ifdef BCMDMA32
144 #define DMA32_ENAB(di) 1
145 #define DMA64_ENAB(di) 1
146 #define DMA64_MODE(di) ((di)->dma64)
147 #else /* !BCMDMA32 */
148 #define DMA32_ENAB(di) 0
149 #define DMA64_ENAB(di) 1
150 #define DMA64_MODE(di) 1
151 #endif /* !BCMDMA32 */
152
153 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
154 #ifdef BCMDMASGLISTOSL
155 #define DMASGLIST_ENAB TRUE
156 #else
157 #define DMASGLIST_ENAB FALSE
158 #endif /* BCMDMASGLISTOSL */
159
160 /* descriptor bumping macros */
161 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
162 #define TXD(x) XXD((x), di->ntxd)
163 #define RXD(x) XXD((x), di->nrxd)
164 #define NEXTTXD(i) TXD((i) + 1)
165 #define PREVTXD(i) TXD((i) - 1)
166 #define NEXTRXD(i) RXD((i) + 1)
167 #define PREVRXD(i) RXD((i) - 1)
168
169 #define NTXDACTIVE(h, t) TXD((t) - (h))
170 #define NRXDACTIVE(h, t) RXD((t) - (h))
171
172 /* macros to convert between byte offsets and indexes */
173 #define B2I(bytes, type) ((bytes) / sizeof(type))
174 #define I2B(index, type) ((index) * sizeof(type))
175
176 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
177 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
178
179 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
180 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
181
182 /* Common prototypes */
183 static bool _dma_isaddrext(dma_info_t *di);
184 static bool _dma_descriptor_align(dma_info_t *di);
185 static bool _dma_alloc(dma_info_t *di, uint direction);
186 static void _dma_detach(dma_info_t *di);
187 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
188 static void _dma_rxinit(dma_info_t *di);
189 static void *_dma_rx(dma_info_t *di);
190 static bool _dma_rxfill(dma_info_t *di);
191 static void _dma_rxreclaim(dma_info_t *di);
192 static void _dma_rxenable(dma_info_t *di);
193 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
194 static void _dma_rx_param_get(dma_info_t *di, uint16 *rxoffset,
195 uint16 *rxbufsize);
196
197 static void _dma_txblock(dma_info_t *di);
198 static void _dma_txunblock(dma_info_t *di);
199 static uint _dma_txactive(dma_info_t *di);
200 static uint _dma_rxactive(dma_info_t *di);
201 static uint _dma_txpending(dma_info_t *di);
202 static uint _dma_txcommitted(dma_info_t *di);
203
204 static void *_dma_peeknexttxp(dma_info_t *di);
205 static void *_dma_peeknextrxp(dma_info_t *di);
206 static uintptr _dma_getvar(dma_info_t *di, const char *name);
207 static void _dma_counterreset(dma_info_t *di);
208 static void _dma_fifoloopbackenable(dma_info_t *di);
209 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
210 static uint8 dma_align_sizetobits(uint size);
211 static void *dma_ringalloc(osl_t *osh, uint32 boundary, uint size,
212 uint16 *alignbits, uint *alloced,
213 dmaaddr_t *descpa, osldma_t **dmah);
214
215 /* Prototypes for 32-bit routines */
216 static bool dma32_alloc(dma_info_t *di, uint direction);
217 static bool dma32_txreset(dma_info_t *di);
218 static bool dma32_rxreset(dma_info_t *di);
219 static bool dma32_txsuspendedidle(dma_info_t *di);
220 static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
221 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
222 static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
223 static void dma32_txrotate(dma_info_t *di);
224 static bool dma32_rxidle(dma_info_t *di);
225 static void dma32_txinit(dma_info_t *di);
226 static bool dma32_txenabled(dma_info_t *di);
227 static void dma32_txsuspend(dma_info_t *di);
228 static void dma32_txresume(dma_info_t *di);
229 static bool dma32_txsuspended(dma_info_t *di);
230 static void dma32_txreclaim(dma_info_t *di, txd_range_t range);
231 static bool dma32_txstopped(dma_info_t *di);
232 static bool dma32_rxstopped(dma_info_t *di);
233 static bool dma32_rxenabled(dma_info_t *di);
234
235 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
236
237 /* Prototypes for 64-bit routines */
238 static bool dma64_alloc(dma_info_t *di, uint direction);
239 static bool dma64_txreset(dma_info_t *di);
240 static bool dma64_rxreset(dma_info_t *di);
241 static bool dma64_txsuspendedidle(dma_info_t *di);
242 static int dma64_txfast(dma_info_t *di, void *p0, bool commit);
243 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
244 static void *dma64_getpos(dma_info_t *di, bool direction);
245 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
246 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
247 static void dma64_txrotate(dma_info_t *di);
248
249 static bool dma64_rxidle(dma_info_t *di);
250 static void dma64_txinit(dma_info_t *di);
251 static bool dma64_txenabled(dma_info_t *di);
252 static void dma64_txsuspend(dma_info_t *di);
253 static void dma64_txresume(dma_info_t *di);
254 static bool dma64_txsuspended(dma_info_t *di);
255 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
256 static bool dma64_txstopped(dma_info_t *di);
257 static bool dma64_rxstopped(dma_info_t *di);
258 static bool dma64_rxenabled(dma_info_t *di);
259 static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs);
260
261 STATIC INLINE uint32 parity32(uint32 data);
262
263 const di_fcn_t dma64proc = {
264 (di_detach_t) _dma_detach,
265 (di_txinit_t) dma64_txinit,
266 (di_txreset_t) dma64_txreset,
267 (di_txenabled_t) dma64_txenabled,
268 (di_txsuspend_t) dma64_txsuspend,
269 (di_txresume_t) dma64_txresume,
270 (di_txsuspended_t) dma64_txsuspended,
271 (di_txsuspendedidle_t) dma64_txsuspendedidle,
272 (di_txfast_t) dma64_txfast,
273 (di_txunframed_t) dma64_txunframed,
274 (di_getpos_t) dma64_getpos,
275 (di_txstopped_t) dma64_txstopped,
276 (di_txreclaim_t) dma64_txreclaim,
277 (di_getnexttxp_t) dma64_getnexttxp,
278 (di_peeknexttxp_t) _dma_peeknexttxp,
279 (di_txblock_t) _dma_txblock,
280 (di_txunblock_t) _dma_txunblock,
281 (di_txactive_t) _dma_txactive,
282 (di_txrotate_t) dma64_txrotate,
283
284 (di_rxinit_t) _dma_rxinit,
285 (di_rxreset_t) dma64_rxreset,
286 (di_rxidle_t) dma64_rxidle,
287 (di_rxstopped_t) dma64_rxstopped,
288 (di_rxenable_t) _dma_rxenable,
289 (di_rxenabled_t) dma64_rxenabled,
290 (di_rx_t) _dma_rx,
291 (di_rxfill_t) _dma_rxfill,
292 (di_rxreclaim_t) _dma_rxreclaim,
293 (di_getnextrxp_t) _dma_getnextrxp,
294 (di_peeknextrxp_t) _dma_peeknextrxp,
295 (di_rxparam_get_t) _dma_rx_param_get,
296
297 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
298 (di_getvar_t) _dma_getvar,
299 (di_counterreset_t) _dma_counterreset,
300 (di_ctrlflags_t) _dma_ctrlflags,
301 NULL,
302 NULL,
303 NULL,
304 (di_rxactive_t) _dma_rxactive,
305 (di_txpending_t) _dma_txpending,
306 (di_txcommitted_t) _dma_txcommitted,
307 39
308 };
309
310 static const di_fcn_t dma32proc = {
311 (di_detach_t) _dma_detach,
312 (di_txinit_t) dma32_txinit,
313 (di_txreset_t) dma32_txreset,
314 (di_txenabled_t) dma32_txenabled,
315 (di_txsuspend_t) dma32_txsuspend,
316 (di_txresume_t) dma32_txresume,
317 (di_txsuspended_t) dma32_txsuspended,
318 (di_txsuspendedidle_t) dma32_txsuspendedidle,
319 (di_txfast_t) dma32_txfast,
320 NULL,
321 NULL,
322 (di_txstopped_t) dma32_txstopped,
323 (di_txreclaim_t) dma32_txreclaim,
324 (di_getnexttxp_t) dma32_getnexttxp,
325 (di_peeknexttxp_t) _dma_peeknexttxp,
326 (di_txblock_t) _dma_txblock,
327 (di_txunblock_t) _dma_txunblock,
328 (di_txactive_t) _dma_txactive,
329 (di_txrotate_t) dma32_txrotate,
330
331 (di_rxinit_t) _dma_rxinit,
332 (di_rxreset_t) dma32_rxreset,
333 (di_rxidle_t) dma32_rxidle,
334 (di_rxstopped_t) dma32_rxstopped,
335 (di_rxenable_t) _dma_rxenable,
336 (di_rxenabled_t) dma32_rxenabled,
337 (di_rx_t) _dma_rx,
338 (di_rxfill_t) _dma_rxfill,
339 (di_rxreclaim_t) _dma_rxreclaim,
340 (di_getnextrxp_t) _dma_getnextrxp,
341 (di_peeknextrxp_t) _dma_peeknextrxp,
342 (di_rxparam_get_t) _dma_rx_param_get,
343
344 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
345 (di_getvar_t) _dma_getvar,
346 (di_counterreset_t) _dma_counterreset,
347 (di_ctrlflags_t) _dma_ctrlflags,
348 NULL,
349 NULL,
350 NULL,
351 (di_rxactive_t) _dma_rxactive,
352 (di_txpending_t) _dma_txpending,
353 (di_txcommitted_t) _dma_txcommitted,
354 39
355 };
356
357 hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih, void *dmaregstx,
358 void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
359 int rxextheadroom, uint nrxpost, uint rxoffset,
360 uint *msg_level)
361 {
362 dma_info_t *di;
363 uint size;
364
365 /* allocate private info structure */
366 di = MALLOC(osh, sizeof(dma_info_t));
367 if (di == NULL) {
368 #ifdef BCMDBG
369 printf("dma_attach: out of memory, malloced %d bytes\n",
370 MALLOCED(osh));
371 #endif
372 return NULL;
373 }
374
375 bzero((char *)di, sizeof(dma_info_t));
376
377 di->msg_level = msg_level ? msg_level : &dma_msg_level;
378
379 /* old chips w/o sb is no longer supported */
380 ASSERT(sih != NULL);
381
382 if (DMA64_ENAB(di))
383 di->dma64 =
384 ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
385 else
386 di->dma64 = 0;
387
388 /* check arguments */
389 ASSERT(ISPOWEROF2(ntxd));
390 ASSERT(ISPOWEROF2(nrxd));
391
392 if (nrxd == 0)
393 ASSERT(dmaregsrx == NULL);
394 if (ntxd == 0)
395 ASSERT(dmaregstx == NULL);
396
397 /* init dma reg pointer */
398 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
399 ASSERT(ntxd <= D64MAXDD);
400 ASSERT(nrxd <= D64MAXDD);
401 di->d64txregs = (dma64regs_t *) dmaregstx;
402 di->d64rxregs = (dma64regs_t *) dmaregsrx;
403 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
404 } else if (DMA32_ENAB(di)) {
405 ASSERT(ntxd <= D32MAXDD);
406 ASSERT(nrxd <= D32MAXDD);
407 di->d32txregs = (dma32regs_t *) dmaregstx;
408 di->d32rxregs = (dma32regs_t *) dmaregsrx;
409 di->hnddma.di_fn = (const di_fcn_t *)&dma32proc;
410 } else {
411 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
412 ASSERT(0);
413 goto fail;
414 }
415
416 /* Default flags (which can be changed by the driver calling dma_ctrlflags
417 * before enable): For backwards compatibility both Rx Overflow Continue
418 * and Parity are DISABLED.
419 * supports it.
420 */
421 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
422 0);
423
424 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
425
426 /* make a private copy of our callers name */
427 strncpy(di->name, name, MAXNAMEL);
428 di->name[MAXNAMEL - 1] = '\0';
429
430 di->osh = osh;
431 di->sih = sih;
432
433 /* save tunables */
434 di->ntxd = (uint16) ntxd;
435 di->nrxd = (uint16) nrxd;
436
437 /* the actual dma size doesn't include the extra headroom */
438 di->rxextrahdrroom =
439 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
440 if (rxbufsize > BCMEXTRAHDROOM)
441 di->rxbufsize = (uint16) (rxbufsize - di->rxextrahdrroom);
442 else
443 di->rxbufsize = (uint16) rxbufsize;
444
445 di->nrxpost = (uint16) nrxpost;
446 di->rxoffset = (uint8) rxoffset;
447
448 /*
449 * figure out the DMA physical address offset for dd and data
450 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
451 * Other bus: use zero
452 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
453 */
454 di->ddoffsetlow = 0;
455 di->dataoffsetlow = 0;
456 /* for pci bus, add offset */
457 if (sih->bustype == PCI_BUS) {
458 if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
459 /* pcie with DMA64 */
460 di->ddoffsetlow = 0;
461 di->ddoffsethigh = SI_PCIE_DMA_H32;
462 } else {
463 /* pci(DMA32/DMA64) or pcie with DMA32 */
464 di->ddoffsetlow = SI_PCI_DMA;
465 di->ddoffsethigh = 0;
466 }
467 di->dataoffsetlow = di->ddoffsetlow;
468 di->dataoffsethigh = di->ddoffsethigh;
469 }
470 #if defined(__mips__) && defined(IL_BIGENDIAN)
471 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
472 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
473 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
474 if ((si_coreid(sih) == SDIOD_CORE_ID)
475 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
476 di->addrext = 0;
477 else if ((si_coreid(sih) == I2S_CORE_ID) &&
478 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
479 di->addrext = 0;
480 else
481 di->addrext = _dma_isaddrext(di);
482
483 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
484 di->aligndesc_4k = _dma_descriptor_align(di);
485 if (di->aligndesc_4k) {
486 if (DMA64_MODE(di)) {
487 di->dmadesc_align = D64RINGALIGN_BITS;
488 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
489 /* for smaller dd table, HW relax the alignment requirement */
490 di->dmadesc_align = D64RINGALIGN_BITS - 1;
491 }
492 } else
493 di->dmadesc_align = D32RINGALIGN_BITS;
494 } else
495 di->dmadesc_align = 4; /* 16 byte alignment */
496
497 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
498 di->aligndesc_4k, di->dmadesc_align));
499
500 /* allocate tx packet pointer vector */
501 if (ntxd) {
502 size = ntxd * sizeof(void *);
503 di->txp = MALLOC(osh, size);
504 if (di->txp == NULL) {
505 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
506 goto fail;
507 }
508 bzero((char *)di->txp, size);
509 }
510
511 /* allocate rx packet pointer vector */
512 if (nrxd) {
513 size = nrxd * sizeof(void *);
514 di->rxp = MALLOC(osh, size);
515 if (di->rxp == NULL) {
516 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
517 goto fail;
518 }
519 bzero((char *)di->rxp, size);
520 }
521
522 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
523 if (ntxd) {
524 if (!_dma_alloc(di, DMA_TX))
525 goto fail;
526 }
527
528 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
529 if (nrxd) {
530 if (!_dma_alloc(di, DMA_RX))
531 goto fail;
532 }
533
534 if ((di->ddoffsetlow != 0) && !di->addrext) {
535 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
536 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (uint32) PHYSADDRLO(di->txdpa)));
537 goto fail;
538 }
539 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
540 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (uint32) PHYSADDRLO(di->rxdpa)));
541 goto fail;
542 }
543 }
544
545 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
546
547 /* allocate DMA mapping vectors */
548 if (DMASGLIST_ENAB) {
549 if (ntxd) {
550 size = ntxd * sizeof(hnddma_seg_map_t);
551 di->txp_dmah = (hnddma_seg_map_t *) MALLOC(osh, size);
552 if (di->txp_dmah == NULL)
553 goto fail;
554 bzero((char *)di->txp_dmah, size);
555 }
556
557 if (nrxd) {
558 size = nrxd * sizeof(hnddma_seg_map_t);
559 di->rxp_dmah = (hnddma_seg_map_t *) MALLOC(osh, size);
560 if (di->rxp_dmah == NULL)
561 goto fail;
562 bzero((char *)di->rxp_dmah, size);
563 }
564 }
565
566 return (hnddma_t *) di;
567
568 fail:
569 _dma_detach(di);
570 return NULL;
571 }
572
573 /* init the tx or rx descriptor */
574 static INLINE void
575 dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx,
576 uint32 *flags, uint32 bufcount)
577 {
578 /* dma32 uses 32-bit control to fit both flags and bufcounter */
579 *flags = *flags | (bufcount & CTRL_BC_MASK);
580
581 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
582 W_SM(&ddring[outidx].addr,
583 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
584 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
585 } else {
586 /* address extension */
587 uint32 ae;
588 ASSERT(di->addrext);
589 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
590 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
591
592 *flags |= (ae << CTRL_AE_SHIFT);
593 W_SM(&ddring[outidx].addr,
594 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
595 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
596 }
597 }
598
599 /* Check for odd number of 1's */
600 STATIC INLINE uint32 parity32(uint32 data)
601 {
602 data ^= data >> 16;
603 data ^= data >> 8;
604 data ^= data >> 4;
605 data ^= data >> 2;
606 data ^= data >> 1;
607
608 return data & 1;
609 }
610
611 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
612
613 static INLINE void
614 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
615 uint32 *flags, uint32 bufcount)
616 {
617 uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
618
619 /* PCI bus with big(>1G) physical address, use address extension */
620 #if defined(__mips__) && defined(IL_BIGENDIAN)
621 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
622 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
623 #else
624 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
625 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
626 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
627
628 W_SM(&ddring[outidx].addrlow,
629 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
630 W_SM(&ddring[outidx].addrhigh,
631 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
632 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
633 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
634 } else {
635 /* address extension for 32-bit PCI */
636 uint32 ae;
637 ASSERT(di->addrext);
638
639 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
640 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
641 ASSERT(PHYSADDRHI(pa) == 0);
642
643 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
644 W_SM(&ddring[outidx].addrlow,
645 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
646 W_SM(&ddring[outidx].addrhigh,
647 BUS_SWAP32(0 + di->dataoffsethigh));
648 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
649 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
650 }
651 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
652 if (DMA64_DD_PARITY(&ddring[outidx])) {
653 W_SM(&ddring[outidx].ctrl2,
654 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
655 }
656 }
657 }
658
659 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
660 {
661 uint32 w;
662
663 OR_REG(osh, &dma32regs->control, XC_AE);
664 w = R_REG(osh, &dma32regs->control);
665 AND_REG(osh, &dma32regs->control, ~XC_AE);
666 return (w & XC_AE) == XC_AE;
667 }
668
669 static bool _dma_alloc(dma_info_t *di, uint direction)
670 {
671 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
672 return dma64_alloc(di, direction);
673 } else if (DMA32_ENAB(di)) {
674 return dma32_alloc(di, direction);
675 } else
676 ASSERT(0);
677 }
678
679 /* !! may be called with core in reset */
680 static void _dma_detach(dma_info_t *di)
681 {
682
683 DMA_TRACE(("%s: dma_detach\n", di->name));
684
685 /* shouldn't be here if descriptors are unreclaimed */
686 ASSERT(di->txin == di->txout);
687 ASSERT(di->rxin == di->rxout);
688
689 /* free dma descriptor rings */
690 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
691 if (di->txd64)
692 DMA_FREE_CONSISTENT(di->osh,
693 ((int8 *) (uintptr) di->txd64 -
694 di->txdalign), di->txdalloc,
695 (di->txdpaorig), &di->tx_dmah);
696 if (di->rxd64)
697 DMA_FREE_CONSISTENT(di->osh,
698 ((int8 *) (uintptr) di->rxd64 -
699 di->rxdalign), di->rxdalloc,
700 (di->rxdpaorig), &di->rx_dmah);
701 } else if (DMA32_ENAB(di)) {
702 if (di->txd32)
703 DMA_FREE_CONSISTENT(di->osh,
704 ((int8 *) (uintptr) di->txd32 -
705 di->txdalign), di->txdalloc,
706 (di->txdpaorig), &di->tx_dmah);
707 if (di->rxd32)
708 DMA_FREE_CONSISTENT(di->osh,
709 ((int8 *) (uintptr) di->rxd32 -
710 di->rxdalign), di->rxdalloc,
711 (di->rxdpaorig), &di->rx_dmah);
712 } else
713 ASSERT(0);
714
715 /* free packet pointer vectors */
716 if (di->txp)
717 MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *)));
718 if (di->rxp)
719 MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *)));
720
721 /* free tx packet DMA handles */
722 if (di->txp_dmah)
723 MFREE(di->osh, (void *)di->txp_dmah,
724 di->ntxd * sizeof(hnddma_seg_map_t));
725
726 /* free rx packet DMA handles */
727 if (di->rxp_dmah)
728 MFREE(di->osh, (void *)di->rxp_dmah,
729 di->nrxd * sizeof(hnddma_seg_map_t));
730
731 /* free our private info structure */
732 MFREE(di->osh, (void *)di, sizeof(dma_info_t));
733
734 }
735
736 static bool _dma_descriptor_align(dma_info_t *di)
737 {
738 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
739 uint32 addrl;
740
741 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
742 if (di->d64txregs != NULL) {
743 W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
744 addrl = R_REG(di->osh, &di->d64txregs->addrlow);
745 if (addrl != 0)
746 return FALSE;
747 } else if (di->d64rxregs != NULL) {
748 W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
749 addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
750 if (addrl != 0)
751 return FALSE;
752 }
753 }
754 return TRUE;
755 }
756
757 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
758 static bool _dma_isaddrext(dma_info_t *di)
759 {
760 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
761 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
762
763 /* not all tx or rx channel are available */
764 if (di->d64txregs != NULL) {
765 if (!_dma64_addrext(di->osh, di->d64txregs)) {
766 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
767 ASSERT(0);
768 }
769 return TRUE;
770 } else if (di->d64rxregs != NULL) {
771 if (!_dma64_addrext(di->osh, di->d64rxregs)) {
772 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
773 ASSERT(0);
774 }
775 return TRUE;
776 }
777 return FALSE;
778 } else if (DMA32_ENAB(di)) {
779 if (di->d32txregs)
780 return _dma32_addrext(di->osh, di->d32txregs);
781 else if (di->d32rxregs)
782 return _dma32_addrext(di->osh, di->d32rxregs);
783 } else
784 ASSERT(0);
785
786 return FALSE;
787 }
788
789 /* initialize descriptor table base address */
790 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
791 {
792 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
793 if (!di->aligndesc_4k) {
794 if (direction == DMA_TX)
795 di->xmtptrbase = PHYSADDRLO(pa);
796 else
797 di->rcvptrbase = PHYSADDRLO(pa);
798 }
799
800 if ((di->ddoffsetlow == 0)
801 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
802 if (direction == DMA_TX) {
803 W_REG(di->osh, &di->d64txregs->addrlow,
804 (PHYSADDRLO(pa) + di->ddoffsetlow));
805 W_REG(di->osh, &di->d64txregs->addrhigh,
806 (PHYSADDRHI(pa) + di->ddoffsethigh));
807 } else {
808 W_REG(di->osh, &di->d64rxregs->addrlow,
809 (PHYSADDRLO(pa) + di->ddoffsetlow));
810 W_REG(di->osh, &di->d64rxregs->addrhigh,
811 (PHYSADDRHI(pa) + di->ddoffsethigh));
812 }
813 } else {
814 /* DMA64 32bits address extension */
815 uint32 ae;
816 ASSERT(di->addrext);
817 ASSERT(PHYSADDRHI(pa) == 0);
818
819 /* shift the high bit(s) from pa to ae */
820 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
821 PCI32ADDR_HIGH_SHIFT;
822 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
823
824 if (direction == DMA_TX) {
825 W_REG(di->osh, &di->d64txregs->addrlow,
826 (PHYSADDRLO(pa) + di->ddoffsetlow));
827 W_REG(di->osh, &di->d64txregs->addrhigh,
828 di->ddoffsethigh);
829 SET_REG(di->osh, &di->d64txregs->control,
830 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
831 } else {
832 W_REG(di->osh, &di->d64rxregs->addrlow,
833 (PHYSADDRLO(pa) + di->ddoffsetlow));
834 W_REG(di->osh, &di->d64rxregs->addrhigh,
835 di->ddoffsethigh);
836 SET_REG(di->osh, &di->d64rxregs->control,
837 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
838 }
839 }
840
841 } else if (DMA32_ENAB(di)) {
842 ASSERT(PHYSADDRHI(pa) == 0);
843 if ((di->ddoffsetlow == 0)
844 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
845 if (direction == DMA_TX)
846 W_REG(di->osh, &di->d32txregs->addr,
847 (PHYSADDRLO(pa) + di->ddoffsetlow));
848 else
849 W_REG(di->osh, &di->d32rxregs->addr,
850 (PHYSADDRLO(pa) + di->ddoffsetlow));
851 } else {
852 /* dma32 address extension */
853 uint32 ae;
854 ASSERT(di->addrext);
855
856 /* shift the high bit(s) from pa to ae */
857 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
858 PCI32ADDR_HIGH_SHIFT;
859 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
860
861 if (direction == DMA_TX) {
862 W_REG(di->osh, &di->d32txregs->addr,
863 (PHYSADDRLO(pa) + di->ddoffsetlow));
864 SET_REG(di->osh, &di->d32txregs->control, XC_AE,
865 ae << XC_AE_SHIFT);
866 } else {
867 W_REG(di->osh, &di->d32rxregs->addr,
868 (PHYSADDRLO(pa) + di->ddoffsetlow));
869 SET_REG(di->osh, &di->d32rxregs->control, RC_AE,
870 ae << RC_AE_SHIFT);
871 }
872 }
873 } else
874 ASSERT(0);
875 }
876
877 static void _dma_fifoloopbackenable(dma_info_t *di)
878 {
879 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
880
881 if (DMA64_ENAB(di) && DMA64_MODE(di))
882 OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
883 else if (DMA32_ENAB(di))
884 OR_REG(di->osh, &di->d32txregs->control, XC_LE);
885 else
886 ASSERT(0);
887 }
888
889 static void _dma_rxinit(dma_info_t *di)
890 {
891 DMA_TRACE(("%s: dma_rxinit\n", di->name));
892
893 if (di->nrxd == 0)
894 return;
895
896 di->rxin = di->rxout = 0;
897
898 /* clear rx descriptor ring */
899 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
900 BZERO_SM((void *)(uintptr) di->rxd64,
901 (di->nrxd * sizeof(dma64dd_t)));
902
903 /* DMA engine with out alignment requirement requires table to be inited
904 * before enabling the engine
905 */
906 if (!di->aligndesc_4k)
907 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
908
909 _dma_rxenable(di);
910
911 if (di->aligndesc_4k)
912 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
913 } else if (DMA32_ENAB(di)) {
914 BZERO_SM((void *)(uintptr) di->rxd32,
915 (di->nrxd * sizeof(dma32dd_t)));
916 _dma_rxenable(di);
917 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
918 } else
919 ASSERT(0);
920 }
921
922 static void _dma_rxenable(dma_info_t *di)
923 {
924 uint dmactrlflags = di->hnddma.dmactrlflags;
925
926 DMA_TRACE(("%s: dma_rxenable\n", di->name));
927
928 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
929 uint32 control =
930 (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
931 D64_RC_RE;
932
933 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
934 control |= D64_RC_PD;
935
936 if (dmactrlflags & DMA_CTRL_ROC)
937 control |= D64_RC_OC;
938
939 W_REG(di->osh, &di->d64rxregs->control,
940 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
941 } else if (DMA32_ENAB(di)) {
942 uint32 control =
943 (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE;
944
945 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
946 control |= RC_PD;
947
948 if (dmactrlflags & DMA_CTRL_ROC)
949 control |= RC_OC;
950
951 W_REG(di->osh, &di->d32rxregs->control,
952 ((di->rxoffset << RC_RO_SHIFT) | control));
953 } else
954 ASSERT(0);
955 }
956
957 static void
958 _dma_rx_param_get(dma_info_t *di, uint16 *rxoffset, uint16 *rxbufsize)
959 {
960 /* the normal values fit into 16 bits */
961 *rxoffset = (uint16) di->rxoffset;
962 *rxbufsize = (uint16) di->rxbufsize;
963 }
964
965 /* !! rx entry routine
966 * returns a pointer to the next frame received, or NULL if there are no more
967 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
968 * with pkts chain
969 * otherwise, it's treated as giant pkt and will be tossed.
970 * The DMA scattering starts with normal DMA header, followed by first buffer data.
971 * After it reaches the max size of buffer, the data continues in next DMA descriptor
972 * buffer WITHOUT DMA header
973 */
974 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
975 {
976 void *p, *head, *tail;
977 uint len;
978 uint pkt_len;
979 int resid = 0;
980
981 next_frame:
982 head = _dma_getnextrxp(di, FALSE);
983 if (head == NULL)
984 return NULL;
985
986 len = ltoh16(*(uint16 *) (PKTDATA(head)));
987 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
988
989 #if defined(__mips__)
990 if (!len) {
991 while (!(len = *(uint16 *) OSL_UNCACHED(PKTDATA(head))))
992 OSL_DELAY(1);
993
994 *(uint16 *) PKTDATA(head) = htol16((uint16) len);
995 }
996 #endif /* defined(__mips__) */
997
998 /* set actual length */
999 pkt_len = MIN((di->rxoffset + len), di->rxbufsize);
1000 PKTSETLEN(head, pkt_len);
1001 resid = len - (di->rxbufsize - di->rxoffset);
1002
1003 /* check for single or multi-buffer rx */
1004 if (resid > 0) {
1005 tail = head;
1006 while ((resid > 0) && (p = _dma_getnextrxp(di, FALSE))) {
1007 PKTSETNEXT(tail, p);
1008 pkt_len = MIN(resid, (int)di->rxbufsize);
1009 PKTSETLEN(p, pkt_len);
1010
1011 tail = p;
1012 resid -= di->rxbufsize;
1013 }
1014
1015 #ifdef BCMDBG
1016 if (resid > 0) {
1017 uint cur;
1018 ASSERT(p == NULL);
1019 cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
1020 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1021 D64_RS0_CD_MASK) -
1022 di->rcvptrbase) & D64_RS0_CD_MASK,
1023 dma64dd_t) : B2I(R_REG(di->osh,
1024 &di->d32rxregs->
1025 status) & RS_CD_MASK,
1026 dma32dd_t);
1027 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1028 di->rxin, di->rxout, cur));
1029 }
1030 #endif /* BCMDBG */
1031
1032 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
1033 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1034 di->name, len));
1035 PKTFREE(di->osh, head, FALSE);
1036 di->hnddma.rxgiants++;
1037 goto next_frame;
1038 }
1039 }
1040
1041 return head;
1042 }
1043
1044 /* post receive buffers
1045 * return FALSE is refill failed completely and ring is empty
1046 * this will stall the rx dma and user might want to call rxfill again asap
1047 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1048 */
1049 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
1050 {
1051 void *p;
1052 uint16 rxin, rxout;
1053 uint32 flags = 0;
1054 uint n;
1055 uint i;
1056 dmaaddr_t pa;
1057 uint extra_offset = 0;
1058 bool ring_empty;
1059
1060 ring_empty = FALSE;
1061
1062 /*
1063 * Determine how many receive buffers we're lacking
1064 * from the full complement, allocate, initialize,
1065 * and post them, then update the chip rx lastdscr.
1066 */
1067
1068 rxin = di->rxin;
1069 rxout = di->rxout;
1070
1071 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
1072
1073 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1074
1075 if (di->rxbufsize > BCMEXTRAHDROOM)
1076 extra_offset = di->rxextrahdrroom;
1077
1078 for (i = 0; i < n; i++) {
1079 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1080 size to be allocated
1081 */
1082
1083 p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
1084
1085 if (p == NULL) {
1086 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1087 di->name));
1088 if (i == 0) {
1089 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1090 if (dma64_rxidle(di)) {
1091 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
1092 ring_empty = TRUE;
1093 }
1094 } else if (DMA32_ENAB(di)) {
1095 if (dma32_rxidle(di)) {
1096 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name));
1097 ring_empty = TRUE;
1098 }
1099 } else
1100 ASSERT(0);
1101 }
1102 di->hnddma.rxnobuf++;
1103 break;
1104 }
1105 /* reserve an extra headroom, if applicable */
1106 if (extra_offset)
1107 PKTPULL(p, extra_offset);
1108
1109 /* Do a cached write instead of uncached write since DMA_MAP
1110 * will flush the cache.
1111 */
1112 *(uint32 *) (PKTDATA(p)) = 0;
1113
1114 if (DMASGLIST_ENAB)
1115 bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
1116
1117 pa = DMA_MAP(di->osh, PKTDATA(p),
1118 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1119
1120 ASSERT(ISALIGNED(PHYSADDRLO(pa), 4));
1121
1122 /* save the free packet pointer */
1123 ASSERT(di->rxp[rxout] == NULL);
1124 di->rxp[rxout] = p;
1125
1126 /* reset flags for each descriptor */
1127 flags = 0;
1128 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1129 if (rxout == (di->nrxd - 1))
1130 flags = D64_CTRL1_EOT;
1131
1132 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1133 di->rxbufsize);
1134 } else if (DMA32_ENAB(di)) {
1135 if (rxout == (di->nrxd - 1))
1136 flags = CTRL_EOT;
1137
1138 ASSERT(PHYSADDRHI(pa) == 0);
1139 dma32_dd_upd(di, di->rxd32, pa, rxout, &flags,
1140 di->rxbufsize);
1141 } else
1142 ASSERT(0);
1143 rxout = NEXTRXD(rxout);
1144 }
1145
1146 di->rxout = rxout;
1147
1148 /* update the chip lastdscr pointer */
1149 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1150 W_REG(di->osh, &di->d64rxregs->ptr,
1151 di->rcvptrbase + I2B(rxout, dma64dd_t));
1152 } else if (DMA32_ENAB(di)) {
1153 W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
1154 } else
1155 ASSERT(0);
1156
1157 return ring_empty;
1158 }
1159
1160 /* like getnexttxp but no reclaim */
1161 static void *_dma_peeknexttxp(dma_info_t *di)
1162 {
1163 uint end, i;
1164
1165 if (di->ntxd == 0)
1166 return NULL;
1167
1168 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1169 end =
1170 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1171 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1172 dma64dd_t);
1173 } else if (DMA32_ENAB(di)) {
1174 end =
1175 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1176 dma32dd_t);
1177 } else
1178 ASSERT(0);
1179
1180 for (i = di->txin; i != end; i = NEXTTXD(i))
1181 if (di->txp[i])
1182 return di->txp[i];
1183
1184 return NULL;
1185 }
1186
1187 /* like getnextrxp but not take off the ring */
1188 static void *_dma_peeknextrxp(dma_info_t *di)
1189 {
1190 uint end, i;
1191
1192 if (di->nrxd == 0)
1193 return NULL;
1194
1195 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1196 end =
1197 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1198 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
1199 dma64dd_t);
1200 } else if (DMA32_ENAB(di)) {
1201 end =
1202 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1203 dma32dd_t);
1204 } else
1205 ASSERT(0);
1206
1207 for (i = di->rxin; i != end; i = NEXTRXD(i))
1208 if (di->rxp[i])
1209 return di->rxp[i];
1210
1211 return NULL;
1212 }
1213
1214 static void _dma_rxreclaim(dma_info_t *di)
1215 {
1216 void *p;
1217
1218 /* "unused local" warning suppression for OSLs that
1219 * define PKTFREE() without using the di->osh arg
1220 */
1221 di = di;
1222
1223 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1224
1225 while ((p = _dma_getnextrxp(di, TRUE)))
1226 PKTFREE(di->osh, p, FALSE);
1227 }
1228
1229 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
1230 {
1231 if (di->nrxd == 0)
1232 return NULL;
1233
1234 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1235 return dma64_getnextrxp(di, forceall);
1236 } else if (DMA32_ENAB(di)) {
1237 return dma32_getnextrxp(di, forceall);
1238 } else
1239 ASSERT(0);
1240 }
1241
1242 static void _dma_txblock(dma_info_t *di)
1243 {
1244 di->hnddma.txavail = 0;
1245 }
1246
1247 static void _dma_txunblock(dma_info_t *di)
1248 {
1249 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1250 }
1251
1252 static uint _dma_txactive(dma_info_t *di)
1253 {
1254 return NTXDACTIVE(di->txin, di->txout);
1255 }
1256
1257 static uint _dma_txpending(dma_info_t *di)
1258 {
1259 uint curr;
1260
1261 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1262 curr =
1263 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1264 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1265 dma64dd_t);
1266 } else if (DMA32_ENAB(di)) {
1267 curr =
1268 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1269 dma32dd_t);
1270 } else
1271 ASSERT(0);
1272
1273 return NTXDACTIVE(curr, di->txout);
1274 }
1275
1276 static uint _dma_txcommitted(dma_info_t *di)
1277 {
1278 uint ptr;
1279 uint txin = di->txin;
1280
1281 if (txin == di->txout)
1282 return 0;
1283
1284 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1285 ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
1286 } else if (DMA32_ENAB(di)) {
1287 ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t);
1288 } else
1289 ASSERT(0);
1290
1291 return NTXDACTIVE(di->txin, ptr);
1292 }
1293
1294 static uint _dma_rxactive(dma_info_t *di)
1295 {
1296 return NRXDACTIVE(di->rxin, di->rxout);
1297 }
1298
1299 static void _dma_counterreset(dma_info_t *di)
1300 {
1301 /* reset all software counter */
1302 di->hnddma.rxgiants = 0;
1303 di->hnddma.rxnobuf = 0;
1304 di->hnddma.txnobuf = 0;
1305 }
1306
1307 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1308 {
1309 uint dmactrlflags = di->hnddma.dmactrlflags;
1310
1311 if (di == NULL) {
1312 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1313 return 0;
1314 }
1315
1316 ASSERT((flags & ~mask) == 0);
1317
1318 dmactrlflags &= ~mask;
1319 dmactrlflags |= flags;
1320
1321 /* If trying to enable parity, check if parity is actually supported */
1322 if (dmactrlflags & DMA_CTRL_PEN) {
1323 uint32 control;
1324
1325 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1326 control = R_REG(di->osh, &di->d64txregs->control);
1327 W_REG(di->osh, &di->d64txregs->control,
1328 control | D64_XC_PD);
1329 if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
1330 /* We *can* disable it so it is supported,
1331 * restore control register
1332 */
1333 W_REG(di->osh, &di->d64txregs->control,
1334 control);
1335 } else {
1336 /* Not supported, don't allow it to be enabled */
1337 dmactrlflags &= ~DMA_CTRL_PEN;
1338 }
1339 } else if (DMA32_ENAB(di)) {
1340 control = R_REG(di->osh, &di->d32txregs->control);
1341 W_REG(di->osh, &di->d32txregs->control,
1342 control | XC_PD);
1343 if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) {
1344 W_REG(di->osh, &di->d32txregs->control,
1345 control);
1346 } else {
1347 /* Not supported, don't allow it to be enabled */
1348 dmactrlflags &= ~DMA_CTRL_PEN;
1349 }
1350 } else
1351 ASSERT(0);
1352 }
1353
1354 di->hnddma.dmactrlflags = dmactrlflags;
1355
1356 return dmactrlflags;
1357 }
1358
1359 /* get the address of the var in order to change later */
1360 static uintptr _dma_getvar(dma_info_t *di, const char *name)
1361 {
1362 if (!strcmp(name, "&txavail"))
1363 return (uintptr) & (di->hnddma.txavail);
1364 else {
1365 ASSERT(0);
1366 }
1367 return 0;
1368 }
1369
1370 void dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
1371 {
1372 OR_REG(osh, &regs->control, XC_LE);
1373 }
1374
1375 static
1376 uint8 dma_align_sizetobits(uint size)
1377 {
1378 uint8 bitpos = 0;
1379 ASSERT(size);
1380 ASSERT(!(size & (size - 1)));
1381 while (size >>= 1) {
1382 bitpos++;
1383 }
1384 return bitpos;
1385 }
1386
1387 /* This function ensures that the DMA descriptor ring will not get allocated
1388 * across Page boundary. If the allocation is done across the page boundary
1389 * at the first time, then it is freed and the allocation is done at
1390 * descriptor ring size aligned location. This will ensure that the ring will
1391 * not cross page boundary
1392 */
1393 static void *dma_ringalloc(osl_t *osh, uint32 boundary, uint size,
1394 uint16 *alignbits, uint *alloced,
1395 dmaaddr_t *descpa, osldma_t **dmah)
1396 {
1397 void *va;
1398 uint32 desc_strtaddr;
1399 uint32 alignbytes = 1 << *alignbits;
1400
1401 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
1402 dmah);
1403 if (NULL == va)
1404 return NULL;
1405
1406 desc_strtaddr = (uint32) ROUNDUP((uintptr) va, alignbytes);
1407 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1408 & boundary)) {
1409 *alignbits = dma_align_sizetobits(size);
1410 DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
1411 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
1412 descpa, dmah);
1413 }
1414 return va;
1415 }
1416
1417 /* 32-bit DMA functions */
1418
1419 static void dma32_txinit(dma_info_t *di)
1420 {
1421 uint32 control = XC_XE;
1422
1423 DMA_TRACE(("%s: dma_txinit\n", di->name));
1424
1425 if (di->ntxd == 0)
1426 return;
1427
1428 di->txin = di->txout = 0;
1429 di->hnddma.txavail = di->ntxd - 1;
1430
1431 /* clear tx descriptor ring */
1432 BZERO_SM((void *)(uintptr) di->txd32, (di->ntxd * sizeof(dma32dd_t)));
1433
1434 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1435 control |= XC_PD;
1436 W_REG(di->osh, &di->d32txregs->control, control);
1437 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1438 }
1439
1440 static bool dma32_txenabled(dma_info_t *di)
1441 {
1442 uint32 xc;
1443
1444 /* If the chip is dead, it is not enabled :-) */
1445 xc = R_REG(di->osh, &di->d32txregs->control);
1446 return (xc != 0xffffffff) && (xc & XC_XE);
1447 }
1448
1449 static void dma32_txsuspend(dma_info_t *di)
1450 {
1451 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1452
1453 if (di->ntxd == 0)
1454 return;
1455
1456 OR_REG(di->osh, &di->d32txregs->control, XC_SE);
1457 }
1458
1459 static void dma32_txresume(dma_info_t *di)
1460 {
1461 DMA_TRACE(("%s: dma_txresume\n", di->name));
1462
1463 if (di->ntxd == 0)
1464 return;
1465
1466 AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
1467 }
1468
1469 static bool dma32_txsuspended(dma_info_t *di)
1470 {
1471 return (di->ntxd == 0)
1472 || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1473 }
1474
1475 static void dma32_txreclaim(dma_info_t *di, txd_range_t range)
1476 {
1477 void *p;
1478
1479 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1480 (range == HNDDMA_RANGE_ALL) ? "all" :
1481 ((range ==
1482 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1483 "transfered")));
1484
1485 if (di->txin == di->txout)
1486 return;
1487
1488 while ((p = dma32_getnexttxp(di, range)))
1489 PKTFREE(di->osh, p, TRUE);
1490 }
1491
1492 static bool dma32_txstopped(dma_info_t *di)
1493 {
1494 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1495 XS_XS_STOPPED);
1496 }
1497
1498 static bool dma32_rxstopped(dma_info_t *di)
1499 {
1500 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1501 RS_RS_STOPPED);
1502 }
1503
1504 static bool dma32_alloc(dma_info_t *di, uint direction)
1505 {
1506 uint size;
1507 uint ddlen;
1508 void *va;
1509 uint alloced;
1510 uint16 align;
1511 uint16 align_bits;
1512
1513 ddlen = sizeof(dma32dd_t);
1514
1515 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1516
1517 alloced = 0;
1518 align_bits = di->dmadesc_align;
1519 align = (1 << align_bits);
1520
1521 if (direction == DMA_TX) {
1522 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1523 &alloced, &di->txdpaorig, &di->tx_dmah);
1524 if (va == NULL) {
1525 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1526 return FALSE;
1527 }
1528
1529 PHYSADDRHISET(di->txdpa, 0);
1530 ASSERT(PHYSADDRHI(di->txdpaorig) == 0);
1531 di->txd32 = (dma32dd_t *) ROUNDUP((uintptr) va, align);
1532 di->txdalign =
1533 (uint) ((int8 *) (uintptr) di->txd32 - (int8 *) va);
1534
1535 PHYSADDRLOSET(di->txdpa,
1536 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1537 /* Make sure that alignment didn't overflow */
1538 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1539
1540 di->txdalloc = alloced;
1541 ASSERT(ISALIGNED((uintptr) di->txd32, align));
1542 } else {
1543 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1544 &alloced, &di->rxdpaorig, &di->rx_dmah);
1545 if (va == NULL) {
1546 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1547 return FALSE;
1548 }
1549
1550 PHYSADDRHISET(di->rxdpa, 0);
1551 ASSERT(PHYSADDRHI(di->rxdpaorig) == 0);
1552 di->rxd32 = (dma32dd_t *) ROUNDUP((uintptr) va, align);
1553 di->rxdalign =
1554 (uint) ((int8 *) (uintptr) di->rxd32 - (int8 *) va);
1555
1556 PHYSADDRLOSET(di->rxdpa,
1557 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1558 /* Make sure that alignment didn't overflow */
1559 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1560 di->rxdalloc = alloced;
1561 ASSERT(ISALIGNED((uintptr) di->rxd32, align));
1562 }
1563
1564 return TRUE;
1565 }
1566
1567 static bool dma32_txreset(dma_info_t *di)
1568 {
1569 uint32 status;
1570
1571 if (di->ntxd == 0)
1572 return TRUE;
1573
1574 /* suspend tx DMA first */
1575 W_REG(di->osh, &di->d32txregs->control, XC_SE);
1576 SPINWAIT(((status =
1577 (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
1578 != XS_XS_DISABLED) && (status != XS_XS_IDLE)
1579 && (status != XS_XS_STOPPED), (10000));
1580
1581 W_REG(di->osh, &di->d32txregs->control, 0);
1582 SPINWAIT(((status = (R_REG(di->osh,
1583 &di->d32txregs->status) & XS_XS_MASK)) !=
1584 XS_XS_DISABLED), 10000);
1585
1586 /* wait for the last transaction to complete */
1587 OSL_DELAY(300);
1588
1589 return status == XS_XS_DISABLED;
1590 }
1591
1592 static bool dma32_rxidle(dma_info_t *di)
1593 {
1594 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1595
1596 if (di->nrxd == 0)
1597 return TRUE;
1598
1599 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1600 R_REG(di->osh, &di->d32rxregs->ptr));
1601 }
1602
1603 static bool dma32_rxreset(dma_info_t *di)
1604 {
1605 uint32 status;
1606
1607 if (di->nrxd == 0)
1608 return TRUE;
1609
1610 W_REG(di->osh, &di->d32rxregs->control, 0);
1611 SPINWAIT(((status = (R_REG(di->osh,
1612 &di->d32rxregs->status) & RS_RS_MASK)) !=
1613 RS_RS_DISABLED), 10000);
1614
1615 return status == RS_RS_DISABLED;
1616 }
1617
1618 static bool dma32_rxenabled(dma_info_t *di)
1619 {
1620 uint32 rc;
1621
1622 rc = R_REG(di->osh, &di->d32rxregs->control);
1623 return (rc != 0xffffffff) && (rc & RC_RE);
1624 }
1625
1626 static bool dma32_txsuspendedidle(dma_info_t *di)
1627 {
1628 if (di->ntxd == 0)
1629 return TRUE;
1630
1631 if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
1632 return 0;
1633
1634 if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1635 return 0;
1636
1637 OSL_DELAY(2);
1638 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1639 XS_XS_IDLE);
1640 }
1641
1642 /* !! tx entry routine
1643 * supports full 32bit dma engine buffer addressing so
1644 * dma buffers can cross 4 Kbyte page boundaries.
1645 *
1646 * WARNING: call must check the return value for error.
1647 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1648 */
1649 static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
1650 {
1651 void *p, *next;
1652 uchar *data;
1653 uint len;
1654 uint16 txout;
1655 uint32 flags = 0;
1656 dmaaddr_t pa;
1657
1658 DMA_TRACE(("%s: dma_txfast\n", di->name));
1659
1660 txout = di->txout;
1661
1662 /*
1663 * Walk the chain of packet buffers
1664 * allocating and initializing transmit descriptor entries.
1665 */
1666 for (p = p0; p; p = next) {
1667 uint nsegs, j;
1668 hnddma_seg_map_t *map;
1669
1670 data = PKTDATA(p);
1671 len = PKTLEN(p);
1672 #ifdef BCM_DMAPAD
1673 len += PKTDMAPAD(di->osh, p);
1674 #endif
1675 next = PKTNEXT(p);
1676
1677 /* return nonzero if out of tx descriptors */
1678 if (NEXTTXD(txout) == di->txin)
1679 goto outoftxd;
1680
1681 if (len == 0)
1682 continue;
1683
1684 if (DMASGLIST_ENAB)
1685 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
1686
1687 /* get physical address of buffer start */
1688 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
1689 &di->txp_dmah[txout]);
1690
1691 if (DMASGLIST_ENAB) {
1692 map = &di->txp_dmah[txout];
1693
1694 /* See if all the segments can be accounted for */
1695 if (map->nsegs >
1696 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1697 1))
1698 goto outoftxd;
1699
1700 nsegs = map->nsegs;
1701 } else
1702 nsegs = 1;
1703
1704 for (j = 1; j <= nsegs; j++) {
1705 flags = 0;
1706 if (p == p0 && j == 1)
1707 flags |= CTRL_SOF;
1708
1709 /* With a DMA segment list, Descriptor table is filled
1710 * using the segment list instead of looping over
1711 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1712 * end of segment list is reached.
1713 */
1714 if ((!DMASGLIST_ENAB && next == NULL) ||
1715 (DMASGLIST_ENAB && j == nsegs))
1716 flags |= (CTRL_IOC | CTRL_EOF);
1717 if (txout == (di->ntxd - 1))
1718 flags |= CTRL_EOT;
1719
1720 if (DMASGLIST_ENAB) {
1721 len = map->segs[j - 1].length;
1722 pa = map->segs[j - 1].addr;
1723 }
1724 ASSERT(PHYSADDRHI(pa) == 0);
1725
1726 dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
1727 ASSERT(di->txp[txout] == NULL);
1728
1729 txout = NEXTTXD(txout);
1730 }
1731
1732 /* See above. No need to loop over individual buffers */
1733 if (DMASGLIST_ENAB)
1734 break;
1735 }
1736
1737 /* if last txd eof not set, fix it */
1738 if (!(flags & CTRL_EOF))
1739 W_SM(&di->txd32[PREVTXD(txout)].ctrl,
1740 BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
1741
1742 /* save the packet */
1743 di->txp[PREVTXD(txout)] = p0;
1744
1745 /* bump the tx descriptor index */
1746 di->txout = txout;
1747
1748 /* kick the chip */
1749 if (commit)
1750 W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
1751
1752 /* tx flow control */
1753 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1754
1755 return 0;
1756
1757 outoftxd:
1758 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
1759 PKTFREE(di->osh, p0, TRUE);
1760 di->hnddma.txavail = 0;
1761 di->hnddma.txnobuf++;
1762 return -1;
1763 }
1764
1765 /*
1766 * Reclaim next completed txd (txds if using chained buffers) in the range
1767 * specified and return associated packet.
1768 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1769 * transmitted as noted by the hardware "CurrDescr" pointer.
1770 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1771 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1772 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1773 * return associated packet regardless of the value of hardware pointers.
1774 */
1775 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range)
1776 {
1777 uint16 start, end, i;
1778 uint16 active_desc;
1779 void *txp;
1780
1781 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1782 (range == HNDDMA_RANGE_ALL) ? "all" :
1783 ((range ==
1784 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1785 "transfered")));
1786
1787 if (di->ntxd == 0)
1788 return NULL;
1789
1790 txp = NULL;
1791
1792 start = di->txin;
1793 if (range == HNDDMA_RANGE_ALL)
1794 end = di->txout;
1795 else {
1796 dma32regs_t *dregs = di->d32txregs;
1797
1798 end =
1799 (uint16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK,
1800 dma32dd_t);
1801
1802 if (range == HNDDMA_RANGE_TRANSFERED) {
1803 active_desc =
1804 (uint16) ((R_REG(di->osh, &dregs->status) &
1805 XS_AD_MASK) >> XS_AD_SHIFT);
1806 active_desc = (uint16) B2I(active_desc, dma32dd_t);
1807 if (end != active_desc)
1808 end = PREVTXD(active_desc);
1809 }
1810 }
1811
1812 if ((start == 0) && (end > di->txout))
1813 goto bogus;
1814
1815 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1816 dmaaddr_t pa;
1817 hnddma_seg_map_t *map = NULL;
1818 uint size, j, nsegs;
1819
1820 PHYSADDRLOSET(pa,
1821 (BUS_SWAP32(R_SM(&di->txd32[i].addr)) -
1822 di->dataoffsetlow));
1823 PHYSADDRHISET(pa, 0);
1824
1825 if (DMASGLIST_ENAB) {
1826 map = &di->txp_dmah[i];
1827 size = map->origsize;
1828 nsegs = map->nsegs;
1829 } else {
1830 size =
1831 (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) &
1832 CTRL_BC_MASK);
1833 nsegs = 1;
1834 }
1835
1836 for (j = nsegs; j > 0; j--) {
1837 W_SM(&di->txd32[i].addr, 0xdeadbeef);
1838
1839 txp = di->txp[i];
1840 di->txp[i] = NULL;
1841 if (j > 1)
1842 i = NEXTTXD(i);
1843 }
1844
1845 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
1846 }
1847
1848 di->txin = i;
1849
1850 /* tx flow control */
1851 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1852
1853 return txp;
1854
1855 bogus:
1856 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1857 return NULL;
1858 }
1859
1860 static void *dma32_getnextrxp(dma_info_t *di, bool forceall)
1861 {
1862 uint i, curr;
1863 void *rxp;
1864 dmaaddr_t pa;
1865 /* if forcing, dma engine must be disabled */
1866 ASSERT(!forceall || !dma32_rxenabled(di));
1867
1868 i = di->rxin;
1869
1870 /* return if no packets posted */
1871 if (i == di->rxout)
1872 return NULL;
1873
1874 curr =
1875 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t);
1876
1877 /* ignore curr if forceall */
1878 if (!forceall && (i == curr))
1879 return NULL;
1880
1881 /* get the packet pointer that corresponds to the rx descriptor */
1882 rxp = di->rxp[i];
1883 ASSERT(rxp);
1884 di->rxp[i] = NULL;
1885
1886 PHYSADDRLOSET(pa,
1887 (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) -
1888 di->dataoffsetlow));
1889 PHYSADDRHISET(pa, 0);
1890
1891 /* clear this packet from the descriptor ring */
1892 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1893
1894 W_SM(&di->rxd32[i].addr, 0xdeadbeef);
1895
1896 di->rxin = NEXTRXD(i);
1897
1898 return rxp;
1899 }
1900
1901 /*
1902 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1903 */
1904 static void dma32_txrotate(dma_info_t *di)
1905 {
1906 uint16 ad;
1907 uint nactive;
1908 uint rot;
1909 uint16 old, new;
1910 uint32 w;
1911 uint16 first, last;
1912
1913 ASSERT(dma32_txsuspendedidle(di));
1914
1915 nactive = _dma_txactive(di);
1916 ad = (uint16) (B2I
1917 (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK)
1918 >> XS_AD_SHIFT), dma32dd_t));
1919 rot = TXD(ad - di->txin);
1920
1921 ASSERT(rot < di->ntxd);
1922
1923 /* full-ring case is a lot harder - don't worry about this */
1924 if (rot >= (di->ntxd - nactive)) {
1925 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1926 return;
1927 }
1928
1929 first = di->txin;
1930 last = PREVTXD(di->txout);
1931
1932 /* move entries starting at last and moving backwards to first */
1933 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1934 new = TXD(old + rot);
1935
1936 /*
1937 * Move the tx dma descriptor.
1938 * EOT is set only in the last entry in the ring.
1939 */
1940 w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
1941 if (new == (di->ntxd - 1))
1942 w |= CTRL_EOT;
1943 W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
1944 W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
1945
1946 /* zap the old tx dma descriptor address field */
1947 W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
1948
1949 /* move the corresponding txp[] entry */
1950 ASSERT(di->txp[new] == NULL);
1951 di->txp[new] = di->txp[old];
1952
1953 /* Move the segment map as well */
1954 if (DMASGLIST_ENAB) {
1955 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
1956 sizeof(hnddma_seg_map_t));
1957 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
1958 }
1959
1960 di->txp[old] = NULL;
1961 }
1962
1963 /* update txin and txout */
1964 di->txin = ad;
1965 di->txout = TXD(di->txout + rot);
1966 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1967
1968 /* kick the chip */
1969 W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
1970 }
1971
1972 /* 64-bit DMA functions */
1973
1974 static void dma64_txinit(dma_info_t *di)
1975 {
1976 uint32 control = D64_XC_XE;
1977
1978 DMA_TRACE(("%s: dma_txinit\n", di->name));
1979
1980 if (di->ntxd == 0)
1981 return;
1982
1983 di->txin = di->txout = 0;
1984 di->hnddma.txavail = di->ntxd - 1;
1985
1986 /* clear tx descriptor ring */
1987 BZERO_SM((void *)(uintptr) di->txd64, (di->ntxd * sizeof(dma64dd_t)));
1988
1989 /* DMA engine with out alignment requirement requires table to be inited
1990 * before enabling the engine
1991 */
1992 if (!di->aligndesc_4k)
1993 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1994
1995 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1996 control |= D64_XC_PD;
1997 OR_REG(di->osh, &di->d64txregs->control, control);
1998
1999 /* DMA engine with alignment requirement requires table to be inited
2000 * before enabling the engine
2001 */
2002 if (di->aligndesc_4k)
2003 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2004 }
2005
2006 static bool dma64_txenabled(dma_info_t *di)
2007 {
2008 uint32 xc;
2009
2010 /* If the chip is dead, it is not enabled :-) */
2011 xc = R_REG(di->osh, &di->d64txregs->control);
2012 return (xc != 0xffffffff) && (xc & D64_XC_XE);
2013 }
2014
2015 static void dma64_txsuspend(dma_info_t *di)
2016 {
2017 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
2018
2019 if (di->ntxd == 0)
2020 return;
2021
2022 OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2023 }
2024
2025 static void dma64_txresume(dma_info_t *di)
2026 {
2027 DMA_TRACE(("%s: dma_txresume\n", di->name));
2028
2029 if (di->ntxd == 0)
2030 return;
2031
2032 AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
2033 }
2034
2035 static bool dma64_txsuspended(dma_info_t *di)
2036 {
2037 return (di->ntxd == 0) ||
2038 ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
2039 D64_XC_SE);
2040 }
2041
2042 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
2043 {
2044 void *p;
2045
2046 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
2047 (range == HNDDMA_RANGE_ALL) ? "all" :
2048 ((range ==
2049 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2050 "transfered")));
2051
2052 if (di->txin == di->txout)
2053 return;
2054
2055 while ((p = dma64_getnexttxp(di, range))) {
2056 /* For unframed data, we don't have any packets to free */
2057 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
2058 PKTFREE(di->osh, p, TRUE);
2059 }
2060 }
2061
2062 static bool dma64_txstopped(dma_info_t *di)
2063 {
2064 return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2065 D64_XS0_XS_STOPPED);
2066 }
2067
2068 static bool dma64_rxstopped(dma_info_t *di)
2069 {
2070 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
2071 D64_RS0_RS_STOPPED);
2072 }
2073
2074 static bool dma64_alloc(dma_info_t *di, uint direction)
2075 {
2076 uint16 size;
2077 uint ddlen;
2078 void *va;
2079 uint alloced = 0;
2080 uint16 align;
2081 uint16 align_bits;
2082
2083 ddlen = sizeof(dma64dd_t);
2084
2085 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
2086 align_bits = di->dmadesc_align;
2087 align = (1 << align_bits);
2088
2089 if (direction == DMA_TX) {
2090 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2091 &alloced, &di->txdpaorig, &di->tx_dmah);
2092 if (va == NULL) {
2093 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
2094 return FALSE;
2095 }
2096 align = (1 << align_bits);
2097 di->txd64 = (dma64dd_t *) ROUNDUP((uintptr) va, align);
2098 di->txdalign =
2099 (uint) ((int8 *) (uintptr) di->txd64 - (int8 *) va);
2100 PHYSADDRLOSET(di->txdpa,
2101 PHYSADDRLO(di->txdpaorig) + di->txdalign);
2102 /* Make sure that alignment didn't overflow */
2103 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
2104
2105 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
2106 di->txdalloc = alloced;
2107 ASSERT(ISALIGNED((uintptr) di->txd64, align));
2108 } else {
2109 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2110 &alloced, &di->rxdpaorig, &di->rx_dmah);
2111 if (va == NULL) {
2112 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
2113 return FALSE;
2114 }
2115 align = (1 << align_bits);
2116 di->rxd64 = (dma64dd_t *) ROUNDUP((uintptr) va, align);
2117 di->rxdalign =
2118 (uint) ((int8 *) (uintptr) di->rxd64 - (int8 *) va);
2119 PHYSADDRLOSET(di->rxdpa,
2120 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
2121 /* Make sure that alignment didn't overflow */
2122 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
2123
2124 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
2125 di->rxdalloc = alloced;
2126 ASSERT(ISALIGNED((uintptr) di->rxd64, align));
2127 }
2128
2129 return TRUE;
2130 }
2131
2132 static bool dma64_txreset(dma_info_t *di)
2133 {
2134 uint32 status;
2135
2136 if (di->ntxd == 0)
2137 return TRUE;
2138
2139 /* suspend tx DMA first */
2140 W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2141 SPINWAIT(((status =
2142 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2143 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
2144 && (status != D64_XS0_XS_STOPPED), 10000);
2145
2146 W_REG(di->osh, &di->d64txregs->control, 0);
2147 SPINWAIT(((status =
2148 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2149 != D64_XS0_XS_DISABLED), 10000);
2150
2151 /* wait for the last transaction to complete */
2152 OSL_DELAY(300);
2153
2154 return status == D64_XS0_XS_DISABLED;
2155 }
2156
2157 static bool dma64_rxidle(dma_info_t *di)
2158 {
2159 DMA_TRACE(("%s: dma_rxidle\n", di->name));
2160
2161 if (di->nrxd == 0)
2162 return TRUE;
2163
2164 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
2165 (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
2166 }
2167
2168 static bool dma64_rxreset(dma_info_t *di)
2169 {
2170 uint32 status;
2171
2172 if (di->nrxd == 0)
2173 return TRUE;
2174
2175 W_REG(di->osh, &di->d64rxregs->control, 0);
2176 SPINWAIT(((status =
2177 (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
2178 != D64_RS0_RS_DISABLED), 10000);
2179
2180 return status == D64_RS0_RS_DISABLED;
2181 }
2182
2183 static bool dma64_rxenabled(dma_info_t *di)
2184 {
2185 uint32 rc;
2186
2187 rc = R_REG(di->osh, &di->d64rxregs->control);
2188 return (rc != 0xffffffff) && (rc & D64_RC_RE);
2189 }
2190
2191 static bool dma64_txsuspendedidle(dma_info_t *di)
2192 {
2193
2194 if (di->ntxd == 0)
2195 return TRUE;
2196
2197 if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
2198 return 0;
2199
2200 if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2201 D64_XS0_XS_IDLE)
2202 return 1;
2203
2204 return 0;
2205 }
2206
2207 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2208 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2209 * If DMA is idle, we return NULL.
2210 */
2211 static void *dma64_getpos(dma_info_t *di, bool direction)
2212 {
2213 void *va;
2214 bool idle;
2215 uint32 cd_offset;
2216
2217 if (direction == DMA_TX) {
2218 cd_offset =
2219 R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
2220 idle = !NTXDACTIVE(di->txin, di->txout);
2221 va = di->txp[B2I(cd_offset, dma64dd_t)];
2222 } else {
2223 cd_offset =
2224 R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
2225 idle = !NRXDACTIVE(di->rxin, di->rxout);
2226 va = di->rxp[B2I(cd_offset, dma64dd_t)];
2227 }
2228
2229 /* If DMA is IDLE, return NULL */
2230 if (idle) {
2231 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
2232 va = NULL;
2233 }
2234
2235 return va;
2236 }
2237
2238 /* TX of unframed data
2239 *
2240 * Adds a DMA ring descriptor for the data pointed to by "buf".
2241 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2242 * that take a pointer to a "packet"
2243 * Each call to this is results in a single descriptor being added for "len" bytes of
2244 * data starting at "buf", it doesn't handle chained buffers.
2245 */
2246 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
2247 {
2248 uint16 txout;
2249 uint32 flags = 0;
2250 dmaaddr_t pa; /* phys addr */
2251
2252 txout = di->txout;
2253
2254 /* return nonzero if out of tx descriptors */
2255 if (NEXTTXD(txout) == di->txin)
2256 goto outoftxd;
2257
2258 if (len == 0)
2259 return 0;
2260
2261 pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
2262
2263 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
2264
2265 if (txout == (di->ntxd - 1))
2266 flags |= D64_CTRL1_EOT;
2267
2268 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2269 ASSERT(di->txp[txout] == NULL);
2270
2271 /* save the buffer pointer - used by dma_getpos */
2272 di->txp[txout] = buf;
2273
2274 txout = NEXTTXD(txout);
2275 /* bump the tx descriptor index */
2276 di->txout = txout;
2277
2278 /* kick the chip */
2279 if (commit) {
2280 W_REG(di->osh, &di->d64txregs->ptr,
2281 di->xmtptrbase + I2B(txout, dma64dd_t));
2282 }
2283
2284 /* tx flow control */
2285 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2286
2287 return 0;
2288
2289 outoftxd:
2290 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
2291 di->hnddma.txavail = 0;
2292 di->hnddma.txnobuf++;
2293 return -1;
2294 }
2295
2296 /* !! tx entry routine
2297 * WARNING: call must check the return value for error.
2298 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2299 */
2300 static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
2301 {
2302 void *p, *next;
2303 uchar *data;
2304 uint len;
2305 uint16 txout;
2306 uint32 flags = 0;
2307 dmaaddr_t pa;
2308
2309 DMA_TRACE(("%s: dma_txfast\n", di->name));
2310
2311 txout = di->txout;
2312
2313 /*
2314 * Walk the chain of packet buffers
2315 * allocating and initializing transmit descriptor entries.
2316 */
2317 for (p = p0; p; p = next) {
2318 uint nsegs, j;
2319 hnddma_seg_map_t *map;
2320
2321 data = PKTDATA(p);
2322 len = PKTLEN(p);
2323 #ifdef BCM_DMAPAD
2324 len += PKTDMAPAD(di->osh, p);
2325 #endif /* BCM_DMAPAD */
2326 next = PKTNEXT(p);
2327
2328 /* return nonzero if out of tx descriptors */
2329 if (NEXTTXD(txout) == di->txin)
2330 goto outoftxd;
2331
2332 if (len == 0)
2333 continue;
2334
2335 /* get physical address of buffer start */
2336 if (DMASGLIST_ENAB)
2337 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
2338
2339 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
2340 &di->txp_dmah[txout]);
2341
2342 if (DMASGLIST_ENAB) {
2343 map = &di->txp_dmah[txout];
2344
2345 /* See if all the segments can be accounted for */
2346 if (map->nsegs >
2347 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
2348 1))
2349 goto outoftxd;
2350
2351 nsegs = map->nsegs;
2352 } else
2353 nsegs = 1;
2354
2355 for (j = 1; j <= nsegs; j++) {
2356 flags = 0;
2357 if (p == p0 && j == 1)
2358 flags |= D64_CTRL1_SOF;
2359
2360 /* With a DMA segment list, Descriptor table is filled
2361 * using the segment list instead of looping over
2362 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2363 * end of segment list is reached.
2364 */
2365 if ((!DMASGLIST_ENAB && next == NULL) ||
2366 (DMASGLIST_ENAB && j == nsegs))
2367 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2368 if (txout == (di->ntxd - 1))
2369 flags |= D64_CTRL1_EOT;
2370
2371 if (DMASGLIST_ENAB) {
2372 len = map->segs[j - 1].length;
2373 pa = map->segs[j - 1].addr;
2374 }
2375 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2376 ASSERT(di->txp[txout] == NULL);
2377
2378 txout = NEXTTXD(txout);
2379 }
2380
2381 /* See above. No need to loop over individual buffers */
2382 if (DMASGLIST_ENAB)
2383 break;
2384 }
2385
2386 /* if last txd eof not set, fix it */
2387 if (!(flags & D64_CTRL1_EOF))
2388 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
2389 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2390
2391 /* save the packet */
2392 di->txp[PREVTXD(txout)] = p0;
2393
2394 /* bump the tx descriptor index */
2395 di->txout = txout;
2396
2397 /* kick the chip */
2398 if (commit)
2399 W_REG(di->osh, &di->d64txregs->ptr,
2400 di->xmtptrbase + I2B(txout, dma64dd_t));
2401
2402 /* tx flow control */
2403 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2404
2405 return 0;
2406
2407 outoftxd:
2408 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
2409 PKTFREE(di->osh, p0, TRUE);
2410 di->hnddma.txavail = 0;
2411 di->hnddma.txnobuf++;
2412 return -1;
2413 }
2414
2415 /*
2416 * Reclaim next completed txd (txds if using chained buffers) in the range
2417 * specified and return associated packet.
2418 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2419 * transmitted as noted by the hardware "CurrDescr" pointer.
2420 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2421 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2422 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2423 * return associated packet regardless of the value of hardware pointers.
2424 */
2425 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
2426 {
2427 uint16 start, end, i;
2428 uint16 active_desc;
2429 void *txp;
2430
2431 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
2432 (range == HNDDMA_RANGE_ALL) ? "all" :
2433 ((range ==
2434 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2435 "transfered")));
2436
2437 if (di->ntxd == 0)
2438 return NULL;
2439
2440 txp = NULL;
2441
2442 start = di->txin;
2443 if (range == HNDDMA_RANGE_ALL)
2444 end = di->txout;
2445 else {
2446 dma64regs_t *dregs = di->d64txregs;
2447
2448 end =
2449 (uint16) (B2I
2450 (((R_REG(di->osh, &dregs->status0) &
2451 D64_XS0_CD_MASK) -
2452 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
2453
2454 if (range == HNDDMA_RANGE_TRANSFERED) {
2455 active_desc =
2456 (uint16) (R_REG(di->osh, &dregs->status1) &
2457 D64_XS1_AD_MASK);
2458 active_desc =
2459 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
2460 active_desc = B2I(active_desc, dma64dd_t);
2461 if (end != active_desc)
2462 end = PREVTXD(active_desc);
2463 }
2464 }
2465
2466 if ((start == 0) && (end > di->txout))
2467 goto bogus;
2468
2469 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
2470 dmaaddr_t pa;
2471 hnddma_seg_map_t *map = NULL;
2472 uint size, j, nsegs;
2473
2474 PHYSADDRLOSET(pa,
2475 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
2476 di->dataoffsetlow));
2477 PHYSADDRHISET(pa,
2478 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
2479 di->dataoffsethigh));
2480
2481 if (DMASGLIST_ENAB) {
2482 map = &di->txp_dmah[i];
2483 size = map->origsize;
2484 nsegs = map->nsegs;
2485 } else {
2486 size =
2487 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
2488 D64_CTRL2_BC_MASK);
2489 nsegs = 1;
2490 }
2491
2492 for (j = nsegs; j > 0; j--) {
2493 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
2494 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
2495
2496 txp = di->txp[i];
2497 di->txp[i] = NULL;
2498 if (j > 1)
2499 i = NEXTTXD(i);
2500 }
2501
2502 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
2503 }
2504
2505 di->txin = i;
2506
2507 /* tx flow control */
2508 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2509
2510 return txp;
2511
2512 bogus:
2513 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
2514 return NULL;
2515 }
2516
2517 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
2518 {
2519 uint i, curr;
2520 void *rxp;
2521 dmaaddr_t pa;
2522
2523 /* if forcing, dma engine must be disabled */
2524 ASSERT(!forceall || !dma64_rxenabled(di));
2525
2526 i = di->rxin;
2527
2528 /* return if no packets posted */
2529 if (i == di->rxout)
2530 return NULL;
2531
2532 curr =
2533 B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
2534 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
2535
2536 /* ignore curr if forceall */
2537 if (!forceall && (i == curr))
2538 return NULL;
2539
2540 /* get the packet pointer that corresponds to the rx descriptor */
2541 rxp = di->rxp[i];
2542 ASSERT(rxp);
2543 di->rxp[i] = NULL;
2544
2545 PHYSADDRLOSET(pa,
2546 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
2547 di->dataoffsetlow));
2548 PHYSADDRHISET(pa,
2549 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
2550 di->dataoffsethigh));
2551
2552 /* clear this packet from the descriptor ring */
2553 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2554
2555 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
2556 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
2557
2558 di->rxin = NEXTRXD(i);
2559
2560 return rxp;
2561 }
2562
2563 static bool _dma64_addrext(osl_t *osh, dma64regs_t * dma64regs)
2564 {
2565 uint32 w;
2566 OR_REG(osh, &dma64regs->control, D64_XC_AE);
2567 w = R_REG(osh, &dma64regs->control);
2568 AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
2569 return (w & D64_XC_AE) == D64_XC_AE;
2570 }
2571
2572 /*
2573 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2574 */
2575 static void dma64_txrotate(dma_info_t *di)
2576 {
2577 uint16 ad;
2578 uint nactive;
2579 uint rot;
2580 uint16 old, new;
2581 uint32 w;
2582 uint16 first, last;
2583
2584 ASSERT(dma64_txsuspendedidle(di));
2585
2586 nactive = _dma_txactive(di);
2587 ad = (uint16) (B2I
2588 ((((R_REG(di->osh, &di->d64txregs->status1) &
2589 D64_XS1_AD_MASK)
2590 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
2591 rot = TXD(ad - di->txin);
2592
2593 ASSERT(rot < di->ntxd);
2594
2595 /* full-ring case is a lot harder - don't worry about this */
2596 if (rot >= (di->ntxd - nactive)) {
2597 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
2598 return;
2599 }
2600
2601 first = di->txin;
2602 last = PREVTXD(di->txout);
2603
2604 /* move entries starting at last and moving backwards to first */
2605 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
2606 new = TXD(old + rot);
2607
2608 /*
2609 * Move the tx dma descriptor.
2610 * EOT is set only in the last entry in the ring.
2611 */
2612 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2613 if (new == (di->ntxd - 1))
2614 w |= D64_CTRL1_EOT;
2615 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
2616
2617 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
2618 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
2619
2620 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
2621 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
2622
2623 /* zap the old tx dma descriptor address field */
2624 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
2625 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
2626
2627 /* move the corresponding txp[] entry */
2628 ASSERT(di->txp[new] == NULL);
2629 di->txp[new] = di->txp[old];
2630
2631 /* Move the map */
2632 if (DMASGLIST_ENAB) {
2633 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
2634 sizeof(hnddma_seg_map_t));
2635 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
2636 }
2637
2638 di->txp[old] = NULL;
2639 }
2640
2641 /* update txin and txout */
2642 di->txin = ad;
2643 di->txout = TXD(di->txout + rot);
2644 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2645
2646 /* kick the chip */
2647 W_REG(di->osh, &di->d64txregs->ptr,
2648 di->xmtptrbase + I2B(di->txout, dma64dd_t));
2649 }
2650
2651 uint dma_addrwidth(si_t *sih, void *dmaregs)
2652 {
2653 dma32regs_t *dma32regs;
2654 osl_t *osh;
2655
2656 osh = si_osh(sih);
2657
2658 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2659 /* DMA engine is 64-bit capable */
2660 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
2661 /* backplane are 64-bit capable */
2662 if (si_backplane64(sih))
2663 /* If bus is System Backplane or PCIE then we can access 64-bits */
2664 if ((BUSTYPE(sih->bustype) == SI_BUS) ||
2665 ((BUSTYPE(sih->bustype) == PCI_BUS) &&
2666 (sih->buscoretype == PCIE_CORE_ID)))
2667 return DMADDRWIDTH_64;
2668
2669 /* DMA64 is always 32-bit capable, AE is always TRUE */
2670 ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs));
2671
2672 return DMADDRWIDTH_32;
2673 }
2674
2675 /* Start checking for 32-bit / 30-bit addressing */
2676 dma32regs = (dma32regs_t *) dmaregs;
2677
2678 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2679 if ((BUSTYPE(sih->bustype) == SI_BUS) ||
2680 ((BUSTYPE(sih->bustype) == PCI_BUS)
2681 && sih->buscoretype == PCIE_CORE_ID)
2682 || (_dma32_addrext(osh, dma32regs)))
2683 return DMADDRWIDTH_32;
2684
2685 /* Fallthru */
2686 return DMADDRWIDTH_30;
2687 }
This page took 0.130404 seconds and 5 git commands to generate.