Commit | Line | Data |
---|---|---|
f078f209 LR |
1 | /* |
2 | * Copyright (c) 2008 Atheros Communications Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | /* | |
18 | * Implementation of receive path. | |
19 | */ | |
20 | ||
21 | #include "core.h" | |
22 | ||
23 | /* | |
24 | * Setup and link descriptors. | |
25 | * | |
26 | * 11N: we can no longer afford to self link the last descriptor. | |
27 | * MAC acknowledges BA status as long as it copies frames to host | |
28 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
29 | * to a sender if last desc is self-linked. | |
30 | * | |
31 | * NOTE: Caller should hold the rxbuf lock. | |
32 | */ | |
33 | ||
34 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |
35 | { | |
36 | struct ath_hal *ah = sc->sc_ah; | |
37 | struct ath_desc *ds; | |
38 | struct sk_buff *skb; | |
39 | ||
40 | ATH_RXBUF_RESET(bf); | |
41 | ||
42 | ds = bf->bf_desc; | |
43 | ds->ds_link = 0; /* link to null */ | |
44 | ds->ds_data = bf->bf_buf_addr; | |
45 | ||
46 | /* XXX For RADAR? | |
47 | * virtual addr of the beginning of the buffer. */ | |
48 | skb = bf->bf_mpdu; | |
49 | ASSERT(skb != NULL); | |
50 | ds->ds_vdata = skb->data; | |
51 | ||
52 | /* setup rx descriptors */ | |
53 | ath9k_hw_setuprxdesc(ah, | |
54 | ds, | |
55 | skb_tailroom(skb), /* buffer size */ | |
56 | 0); | |
57 | ||
58 | if (sc->sc_rxlink == NULL) | |
59 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | |
60 | else | |
61 | *sc->sc_rxlink = bf->bf_daddr; | |
62 | ||
63 | sc->sc_rxlink = &ds->ds_link; | |
64 | ath9k_hw_rxena(ah); | |
65 | } | |
66 | ||
67 | /* Process received BAR frame */ | |
68 | ||
69 | static int ath_bar_rx(struct ath_softc *sc, | |
70 | struct ath_node *an, | |
71 | struct sk_buff *skb) | |
72 | { | |
73 | struct ieee80211_bar *bar; | |
74 | struct ath_arx_tid *rxtid; | |
75 | struct sk_buff *tskb; | |
76 | struct ath_recv_status *rx_status; | |
77 | int tidno, index, cindex; | |
78 | u16 seqno; | |
79 | ||
80 | /* look at BAR contents */ | |
81 | ||
82 | bar = (struct ieee80211_bar *)skb->data; | |
83 | tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M) | |
84 | >> IEEE80211_BAR_CTL_TID_S; | |
85 | seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT; | |
86 | ||
87 | /* process BAR - indicate all pending RX frames till the BAR seqno */ | |
88 | ||
89 | rxtid = &an->an_aggr.rx.tid[tidno]; | |
90 | ||
91 | spin_lock_bh(&rxtid->tidlock); | |
92 | ||
93 | /* get relative index */ | |
94 | ||
95 | index = ATH_BA_INDEX(rxtid->seq_next, seqno); | |
96 | ||
97 | /* drop BAR if old sequence (index is too large) */ | |
98 | ||
99 | if ((index > rxtid->baw_size) && | |
100 | (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2)))) | |
101 | /* discard frame, ieee layer may not treat frame as a dup */ | |
102 | goto unlock_and_free; | |
103 | ||
104 | /* complete receive processing for all pending frames upto BAR seqno */ | |
105 | ||
106 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | |
107 | while ((rxtid->baw_head != rxtid->baw_tail) && | |
108 | (rxtid->baw_head != cindex)) { | |
109 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | |
110 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | |
111 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | |
112 | ||
113 | if (tskb != NULL) | |
114 | ath_rx_subframe(an, tskb, rx_status); | |
115 | ||
116 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
117 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
118 | } | |
119 | ||
120 | /* ... and indicate rest of the frames in-order */ | |
121 | ||
122 | while (rxtid->baw_head != rxtid->baw_tail && | |
123 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) { | |
124 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | |
125 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | |
126 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | |
127 | ||
128 | ath_rx_subframe(an, tskb, rx_status); | |
129 | ||
130 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
131 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
132 | } | |
133 | ||
134 | unlock_and_free: | |
135 | spin_unlock_bh(&rxtid->tidlock); | |
136 | /* free bar itself */ | |
137 | dev_kfree_skb(skb); | |
138 | return IEEE80211_FTYPE_CTL; | |
139 | } | |
140 | ||
141 | /* Function to handle a subframe of aggregation when HT is enabled */ | |
142 | ||
143 | static int ath_ampdu_input(struct ath_softc *sc, | |
144 | struct ath_node *an, | |
145 | struct sk_buff *skb, | |
146 | struct ath_recv_status *rx_status) | |
147 | { | |
148 | struct ieee80211_hdr *hdr; | |
149 | struct ath_arx_tid *rxtid; | |
150 | struct ath_rxbuf *rxbuf; | |
151 | u8 type, subtype; | |
152 | u16 rxseq; | |
153 | int tid = 0, index, cindex, rxdiff; | |
154 | __le16 fc; | |
155 | u8 *qc; | |
156 | ||
157 | hdr = (struct ieee80211_hdr *)skb->data; | |
158 | fc = hdr->frame_control; | |
159 | ||
160 | /* collect stats of frames with non-zero version */ | |
161 | ||
162 | if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) { | |
163 | dev_kfree_skb(skb); | |
164 | return -1; | |
165 | } | |
166 | ||
167 | type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; | |
168 | subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE; | |
169 | ||
170 | if (ieee80211_is_back_req(fc)) | |
171 | return ath_bar_rx(sc, an, skb); | |
172 | ||
173 | /* special aggregate processing only for qos unicast data frames */ | |
174 | ||
175 | if (!ieee80211_is_data(fc) || | |
176 | !ieee80211_is_data_qos(fc) || | |
177 | is_multicast_ether_addr(hdr->addr1)) | |
178 | return ath_rx_subframe(an, skb, rx_status); | |
179 | ||
180 | /* lookup rx tid state */ | |
181 | ||
182 | if (ieee80211_is_data_qos(fc)) { | |
183 | qc = ieee80211_get_qos_ctl(hdr); | |
184 | tid = qc[0] & 0xf; | |
185 | } | |
186 | ||
b4696c8b | 187 | if (sc->sc_ah->ah_opmode == ATH9K_M_STA) { |
f078f209 LR |
188 | /* Drop the frame not belonging to me. */ |
189 | if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { | |
190 | dev_kfree_skb(skb); | |
191 | return -1; | |
192 | } | |
193 | } | |
194 | ||
195 | rxtid = &an->an_aggr.rx.tid[tid]; | |
196 | ||
197 | spin_lock(&rxtid->tidlock); | |
198 | ||
199 | rxdiff = (rxtid->baw_tail - rxtid->baw_head) & | |
200 | (ATH_TID_MAX_BUFS - 1); | |
201 | ||
202 | /* | |
203 | * If the ADDBA exchange has not been completed by the source, | |
204 | * process via legacy path (i.e. no reordering buffer is needed) | |
205 | */ | |
206 | if (!rxtid->addba_exchangecomplete) { | |
207 | spin_unlock(&rxtid->tidlock); | |
208 | return ath_rx_subframe(an, skb, rx_status); | |
209 | } | |
210 | ||
211 | /* extract sequence number from recvd frame */ | |
212 | ||
213 | rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT; | |
214 | ||
215 | if (rxtid->seq_reset) { | |
216 | rxtid->seq_reset = 0; | |
217 | rxtid->seq_next = rxseq; | |
218 | } | |
219 | ||
220 | index = ATH_BA_INDEX(rxtid->seq_next, rxseq); | |
221 | ||
222 | /* drop frame if old sequence (index is too large) */ | |
223 | ||
224 | if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) { | |
225 | /* discard frame, ieee layer may not treat frame as a dup */ | |
226 | spin_unlock(&rxtid->tidlock); | |
227 | dev_kfree_skb(skb); | |
228 | return IEEE80211_FTYPE_DATA; | |
229 | } | |
230 | ||
231 | /* sequence number is beyond block-ack window */ | |
232 | ||
233 | if (index >= rxtid->baw_size) { | |
234 | ||
235 | /* complete receive processing for all pending frames */ | |
236 | ||
237 | while (index >= rxtid->baw_size) { | |
238 | ||
239 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
240 | ||
241 | if (rxbuf->rx_wbuf != NULL) { | |
242 | ath_rx_subframe(an, rxbuf->rx_wbuf, | |
243 | &rxbuf->rx_status); | |
244 | rxbuf->rx_wbuf = NULL; | |
245 | } | |
246 | ||
247 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
248 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
249 | ||
250 | index--; | |
251 | } | |
252 | } | |
253 | ||
254 | /* add buffer to the recv ba window */ | |
255 | ||
256 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | |
257 | rxbuf = rxtid->rxbuf + cindex; | |
258 | ||
259 | if (rxbuf->rx_wbuf != NULL) { | |
260 | spin_unlock(&rxtid->tidlock); | |
261 | /* duplicate frame */ | |
262 | dev_kfree_skb(skb); | |
263 | return IEEE80211_FTYPE_DATA; | |
264 | } | |
265 | ||
266 | rxbuf->rx_wbuf = skb; | |
267 | rxbuf->rx_time = get_timestamp(); | |
268 | rxbuf->rx_status = *rx_status; | |
269 | ||
270 | /* advance tail if sequence received is newer | |
271 | * than any received so far */ | |
272 | ||
273 | if (index >= rxdiff) { | |
274 | rxtid->baw_tail = cindex; | |
275 | INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS); | |
276 | } | |
277 | ||
278 | /* indicate all in-order received frames */ | |
279 | ||
280 | while (rxtid->baw_head != rxtid->baw_tail) { | |
281 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
282 | if (!rxbuf->rx_wbuf) | |
283 | break; | |
284 | ||
285 | ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status); | |
286 | rxbuf->rx_wbuf = NULL; | |
287 | ||
288 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
289 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
290 | } | |
291 | ||
292 | /* | |
293 | * start a timer to flush all received frames if there are pending | |
294 | * receive frames | |
295 | */ | |
296 | if (rxtid->baw_head != rxtid->baw_tail) | |
297 | mod_timer(&rxtid->timer, ATH_RX_TIMEOUT); | |
298 | else | |
299 | del_timer_sync(&rxtid->timer); | |
300 | ||
301 | spin_unlock(&rxtid->tidlock); | |
302 | return IEEE80211_FTYPE_DATA; | |
303 | } | |
304 | ||
305 | /* Timer to flush all received sub-frames */ | |
306 | ||
307 | static void ath_rx_timer(unsigned long data) | |
308 | { | |
309 | struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data; | |
310 | struct ath_node *an = rxtid->an; | |
311 | struct ath_rxbuf *rxbuf; | |
312 | int nosched; | |
313 | ||
314 | spin_lock_bh(&rxtid->tidlock); | |
315 | while (rxtid->baw_head != rxtid->baw_tail) { | |
316 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
317 | if (!rxbuf->rx_wbuf) { | |
318 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
319 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
320 | continue; | |
321 | } | |
322 | ||
323 | /* | |
324 | * Stop if the next one is a very recent frame. | |
325 | * | |
326 | * Call get_timestamp in every iteration to protect against the | |
327 | * case in which a new frame is received while we are executing | |
328 | * this function. Using a timestamp obtained before entering | |
329 | * the loop could lead to a very large time interval | |
330 | * (a negative value typecast to unsigned), breaking the | |
331 | * function's logic. | |
332 | */ | |
333 | if ((get_timestamp() - rxbuf->rx_time) < | |
334 | (ATH_RX_TIMEOUT * HZ / 1000)) | |
335 | break; | |
336 | ||
337 | ath_rx_subframe(an, rxbuf->rx_wbuf, | |
338 | &rxbuf->rx_status); | |
339 | rxbuf->rx_wbuf = NULL; | |
340 | ||
341 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
342 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
343 | } | |
344 | ||
345 | /* | |
346 | * start a timer to flush all received frames if there are pending | |
347 | * receive frames | |
348 | */ | |
349 | if (rxtid->baw_head != rxtid->baw_tail) | |
350 | nosched = 0; | |
351 | else | |
352 | nosched = 1; /* no need to re-arm the timer again */ | |
353 | ||
354 | spin_unlock_bh(&rxtid->tidlock); | |
355 | } | |
356 | ||
357 | /* Free all pending sub-frames in the re-ordering buffer */ | |
358 | ||
359 | static void ath_rx_flush_tid(struct ath_softc *sc, | |
360 | struct ath_arx_tid *rxtid, int drop) | |
361 | { | |
362 | struct ath_rxbuf *rxbuf; | |
363 | ||
364 | spin_lock_bh(&rxtid->tidlock); | |
365 | while (rxtid->baw_head != rxtid->baw_tail) { | |
366 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | |
367 | if (!rxbuf->rx_wbuf) { | |
368 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
369 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
370 | continue; | |
371 | } | |
372 | ||
373 | if (drop) | |
374 | dev_kfree_skb(rxbuf->rx_wbuf); | |
375 | else | |
376 | ath_rx_subframe(rxtid->an, | |
377 | rxbuf->rx_wbuf, | |
378 | &rxbuf->rx_status); | |
379 | ||
380 | rxbuf->rx_wbuf = NULL; | |
381 | ||
382 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | |
383 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | |
384 | } | |
385 | spin_unlock_bh(&rxtid->tidlock); | |
386 | } | |
387 | ||
388 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | |
389 | u32 len) | |
390 | { | |
391 | struct sk_buff *skb; | |
392 | u32 off; | |
393 | ||
394 | /* | |
395 | * Cache-line-align. This is important (for the | |
396 | * 5210 at least) as not doing so causes bogus data | |
397 | * in rx'd frames. | |
398 | */ | |
399 | ||
400 | skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); | |
401 | if (skb != NULL) { | |
402 | off = ((unsigned long) skb->data) % sc->sc_cachelsz; | |
403 | if (off != 0) | |
404 | skb_reserve(skb, sc->sc_cachelsz - off); | |
405 | } else { | |
406 | DPRINTF(sc, ATH_DBG_FATAL, | |
407 | "%s: skbuff alloc of size %u failed\n", | |
408 | __func__, len); | |
409 | return NULL; | |
410 | } | |
411 | ||
412 | return skb; | |
413 | } | |
414 | ||
415 | static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) | |
416 | { | |
417 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | |
418 | ||
419 | ASSERT(bf != NULL); | |
420 | ||
421 | spin_lock_bh(&sc->sc_rxbuflock); | |
422 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
423 | /* | |
424 | * This buffer is still held for hw acess. | |
425 | * Mark it as free to be re-queued it later. | |
426 | */ | |
427 | bf->bf_status |= ATH_BUFSTATUS_FREE; | |
428 | } else { | |
429 | /* XXX: we probably never enter here, remove after | |
430 | * verification */ | |
431 | list_add_tail(&bf->list, &sc->sc_rxbuf); | |
432 | ath_rx_buf_link(sc, bf); | |
433 | } | |
434 | spin_unlock_bh(&sc->sc_rxbuflock); | |
435 | } | |
436 | ||
437 | /* | |
438 | * The skb indicated to upper stack won't be returned to us. | |
439 | * So we have to allocate a new one and queue it by ourselves. | |
440 | */ | |
441 | static int ath_rx_indicate(struct ath_softc *sc, | |
442 | struct sk_buff *skb, | |
443 | struct ath_recv_status *status, | |
444 | u16 keyix) | |
445 | { | |
446 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | |
447 | struct sk_buff *nskb; | |
448 | int type; | |
449 | ||
450 | /* indicate frame to the stack, which will free the old skb. */ | |
451 | type = ath__rx_indicate(sc, skb, status, keyix); | |
452 | ||
453 | /* allocate a new skb and queue it to for H/W processing */ | |
454 | nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | |
455 | if (nskb != NULL) { | |
456 | bf->bf_mpdu = nskb; | |
457 | bf->bf_buf_addr = ath_skb_map_single(sc, | |
458 | nskb, | |
459 | PCI_DMA_FROMDEVICE, | |
460 | /* XXX: Remove get_dma_mem_context() */ | |
461 | get_dma_mem_context(bf, bf_dmacontext)); | |
462 | ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; | |
463 | ||
464 | /* queue the new wbuf to H/W */ | |
465 | ath_rx_requeue(sc, nskb); | |
466 | } | |
467 | ||
468 | return type; | |
469 | } | |
470 | ||
471 | static void ath_opmode_init(struct ath_softc *sc) | |
472 | { | |
473 | struct ath_hal *ah = sc->sc_ah; | |
474 | u32 rfilt, mfilt[2]; | |
475 | ||
476 | /* configure rx filter */ | |
477 | rfilt = ath_calcrxfilter(sc); | |
478 | ath9k_hw_setrxfilter(ah, rfilt); | |
479 | ||
480 | /* configure bssid mask */ | |
60b67f51 | 481 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) |
f078f209 LR |
482 | ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); |
483 | ||
484 | /* configure operational mode */ | |
485 | ath9k_hw_setopmode(ah); | |
486 | ||
487 | /* Handle any link-level address change. */ | |
488 | ath9k_hw_setmac(ah, sc->sc_myaddr); | |
489 | ||
490 | /* calculate and install multicast filter */ | |
491 | mfilt[0] = mfilt[1] = ~0; | |
492 | ||
493 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); | |
494 | DPRINTF(sc, ATH_DBG_CONFIG , | |
495 | "%s: RX filter 0x%x, MC filter %08x:%08x\n", | |
496 | __func__, rfilt, mfilt[0], mfilt[1]); | |
497 | } | |
498 | ||
499 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
500 | { | |
501 | struct sk_buff *skb; | |
502 | struct ath_buf *bf; | |
503 | int error = 0; | |
504 | ||
505 | do { | |
506 | spin_lock_init(&sc->sc_rxflushlock); | |
507 | sc->sc_rxflush = 0; | |
508 | spin_lock_init(&sc->sc_rxbuflock); | |
509 | ||
510 | /* | |
511 | * Cisco's VPN software requires that drivers be able to | |
512 | * receive encapsulated frames that are larger than the MTU. | |
513 | * Since we can't be sure how large a frame we'll get, setup | |
514 | * to handle the larges on possible. | |
515 | */ | |
516 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | |
517 | min(sc->sc_cachelsz, | |
518 | (u16)64)); | |
519 | ||
520 | DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n", | |
521 | __func__, sc->sc_cachelsz, sc->sc_rxbufsize); | |
522 | ||
523 | /* Initialize rx descriptors */ | |
524 | ||
525 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, | |
526 | "rx", nbufs, 1); | |
527 | if (error != 0) { | |
528 | DPRINTF(sc, ATH_DBG_FATAL, | |
529 | "%s: failed to allocate rx descriptors: %d\n", | |
530 | __func__, error); | |
531 | break; | |
532 | } | |
533 | ||
534 | /* Pre-allocate a wbuf for each rx buffer */ | |
535 | ||
536 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | |
537 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | |
538 | if (skb == NULL) { | |
539 | error = -ENOMEM; | |
540 | break; | |
541 | } | |
542 | ||
543 | bf->bf_mpdu = skb; | |
544 | bf->bf_buf_addr = | |
545 | ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE, | |
546 | get_dma_mem_context(bf, bf_dmacontext)); | |
547 | ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; | |
548 | } | |
549 | sc->sc_rxlink = NULL; | |
550 | ||
551 | } while (0); | |
552 | ||
553 | if (error) | |
554 | ath_rx_cleanup(sc); | |
555 | ||
556 | return error; | |
557 | } | |
558 | ||
559 | /* Reclaim all rx queue resources */ | |
560 | ||
561 | void ath_rx_cleanup(struct ath_softc *sc) | |
562 | { | |
563 | struct sk_buff *skb; | |
564 | struct ath_buf *bf; | |
565 | ||
566 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | |
567 | skb = bf->bf_mpdu; | |
568 | if (skb) | |
569 | dev_kfree_skb(skb); | |
570 | } | |
571 | ||
572 | /* cleanup rx descriptors */ | |
573 | ||
574 | if (sc->sc_rxdma.dd_desc_len != 0) | |
575 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | |
576 | } | |
577 | ||
578 | /* | |
579 | * Calculate the receive filter according to the | |
580 | * operating mode and state: | |
581 | * | |
582 | * o always accept unicast, broadcast, and multicast traffic | |
583 | * o maintain current state of phy error reception (the hal | |
584 | * may enable phy error frames for noise immunity work) | |
585 | * o probe request frames are accepted only when operating in | |
586 | * hostap, adhoc, or monitor modes | |
587 | * o enable promiscuous mode according to the interface state | |
588 | * o accept beacons: | |
589 | * - when operating in adhoc mode so the 802.11 layer creates | |
590 | * node table entries for peers, | |
591 | * - when operating in station mode for collecting rssi data when | |
592 | * the station is otherwise quiet, or | |
593 | * - when operating as a repeater so we see repeater-sta beacons | |
594 | * - when scanning | |
595 | */ | |
596 | ||
597 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
598 | { | |
599 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 600 | |
f078f209 LR |
601 | u32 rfilt; |
602 | ||
603 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
604 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
605 | | ATH9K_RX_FILTER_MCAST; | |
606 | ||
607 | /* If not a STA, enable processing of Probe Requests */ | |
b4696c8b | 608 | if (sc->sc_ah->ah_opmode != ATH9K_M_STA) |
f078f209 LR |
609 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
610 | ||
611 | /* Can't set HOSTAP into promiscous mode */ | |
b4696c8b | 612 | if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) && |
7dcfdcd9 | 613 | (sc->rx_filter & FIF_PROMISC_IN_BSS)) || |
b4696c8b | 614 | (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) { |
f078f209 LR |
615 | rfilt |= ATH9K_RX_FILTER_PROM; |
616 | /* ??? To prevent from sending ACK */ | |
617 | rfilt &= ~ATH9K_RX_FILTER_UCAST; | |
618 | } | |
619 | ||
b4696c8b | 620 | if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) && |
7dcfdcd9 | 621 | (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) || |
b4696c8b | 622 | (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)) |
f078f209 LR |
623 | rfilt |= ATH9K_RX_FILTER_BEACON; |
624 | ||
625 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames | |
626 | & beacon frames */ | |
b4696c8b | 627 | if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) |
f078f209 LR |
628 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); |
629 | return rfilt; | |
7dcfdcd9 | 630 | |
f078f209 LR |
631 | #undef RX_FILTER_PRESERVE |
632 | } | |
633 | ||
634 | /* Enable the receive h/w following a reset. */ | |
635 | ||
636 | int ath_startrecv(struct ath_softc *sc) | |
637 | { | |
638 | struct ath_hal *ah = sc->sc_ah; | |
639 | struct ath_buf *bf, *tbf; | |
640 | ||
641 | spin_lock_bh(&sc->sc_rxbuflock); | |
642 | if (list_empty(&sc->sc_rxbuf)) | |
643 | goto start_recv; | |
644 | ||
645 | sc->sc_rxlink = NULL; | |
646 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | |
647 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
648 | /* restarting h/w, no need for holding descriptors */ | |
649 | bf->bf_status &= ~ATH_BUFSTATUS_STALE; | |
650 | /* | |
651 | * Upper layer may not be done with the frame yet so | |
652 | * we can't just re-queue it to hardware. Remove it | |
653 | * from h/w queue. It'll be re-queued when upper layer | |
654 | * returns the frame and ath_rx_requeue_mpdu is called. | |
655 | */ | |
656 | if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) { | |
657 | list_del(&bf->list); | |
658 | continue; | |
659 | } | |
660 | } | |
661 | /* chain descriptors */ | |
662 | ath_rx_buf_link(sc, bf); | |
663 | } | |
664 | ||
665 | /* We could have deleted elements so the list may be empty now */ | |
666 | if (list_empty(&sc->sc_rxbuf)) | |
667 | goto start_recv; | |
668 | ||
669 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | |
670 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | |
671 | ath9k_hw_rxena(ah); /* enable recv descriptors */ | |
672 | ||
673 | start_recv: | |
674 | spin_unlock_bh(&sc->sc_rxbuflock); | |
675 | ath_opmode_init(sc); /* set filters, etc. */ | |
676 | ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ | |
677 | return 0; | |
678 | } | |
679 | ||
680 | /* Disable the receive h/w in preparation for a reset. */ | |
681 | ||
682 | bool ath_stoprecv(struct ath_softc *sc) | |
683 | { | |
684 | struct ath_hal *ah = sc->sc_ah; | |
685 | u64 tsf; | |
686 | bool stopped; | |
687 | ||
688 | ath9k_hw_stoppcurecv(ah); /* disable PCU */ | |
689 | ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ | |
690 | stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ | |
691 | mdelay(3); /* 3ms is long enough for 1 frame */ | |
692 | tsf = ath9k_hw_gettsf64(ah); | |
693 | sc->sc_rxlink = NULL; /* just in case */ | |
694 | return stopped; | |
695 | } | |
696 | ||
697 | /* Flush receive queue */ | |
698 | ||
699 | void ath_flushrecv(struct ath_softc *sc) | |
700 | { | |
701 | /* | |
702 | * ath_rx_tasklet may be used to handle rx interrupt and flush receive | |
703 | * queue at the same time. Use a lock to serialize the access of rx | |
704 | * queue. | |
705 | * ath_rx_tasklet cannot hold the spinlock while indicating packets. | |
706 | * Instead, do not claim the spinlock but check for a flush in | |
707 | * progress (see references to sc_rxflush) | |
708 | */ | |
709 | spin_lock_bh(&sc->sc_rxflushlock); | |
710 | sc->sc_rxflush = 1; | |
711 | ||
712 | ath_rx_tasklet(sc, 1); | |
713 | ||
714 | sc->sc_rxflush = 0; | |
715 | spin_unlock_bh(&sc->sc_rxflushlock); | |
716 | } | |
717 | ||
718 | /* Process an individual frame */ | |
719 | ||
720 | int ath_rx_input(struct ath_softc *sc, | |
721 | struct ath_node *an, | |
722 | int is_ampdu, | |
723 | struct sk_buff *skb, | |
724 | struct ath_recv_status *rx_status, | |
725 | enum ATH_RX_TYPE *status) | |
726 | { | |
672840ac | 727 | if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) { |
f078f209 LR |
728 | *status = ATH_RX_CONSUMED; |
729 | return ath_ampdu_input(sc, an, skb, rx_status); | |
730 | } else { | |
731 | *status = ATH_RX_NON_CONSUMED; | |
732 | return -1; | |
733 | } | |
734 | } | |
735 | ||
736 | /* Process receive queue, as well as LED, etc. */ | |
737 | ||
738 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | |
739 | { | |
740 | #define PA2DESC(_sc, _pa) \ | |
741 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | |
742 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | |
743 | ||
744 | struct ath_buf *bf, *bf_held = NULL; | |
745 | struct ath_desc *ds; | |
746 | struct ieee80211_hdr *hdr; | |
747 | struct sk_buff *skb = NULL; | |
748 | struct ath_recv_status rx_status; | |
749 | struct ath_hal *ah = sc->sc_ah; | |
750 | int type, rx_processed = 0; | |
751 | u32 phyerr; | |
752 | u8 chainreset = 0; | |
753 | int retval; | |
754 | __le16 fc; | |
755 | ||
756 | do { | |
757 | /* If handling rx interrupt and flush is in progress => exit */ | |
758 | if (sc->sc_rxflush && (flush == 0)) | |
759 | break; | |
760 | ||
761 | spin_lock_bh(&sc->sc_rxbuflock); | |
762 | if (list_empty(&sc->sc_rxbuf)) { | |
763 | sc->sc_rxlink = NULL; | |
764 | spin_unlock_bh(&sc->sc_rxbuflock); | |
765 | break; | |
766 | } | |
767 | ||
768 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | |
769 | ||
770 | /* | |
771 | * There is a race condition that BH gets scheduled after sw | |
772 | * writes RxE and before hw re-load the last descriptor to get | |
773 | * the newly chained one. Software must keep the last DONE | |
774 | * descriptor as a holding descriptor - software does so by | |
775 | * marking it with the STALE flag. | |
776 | */ | |
777 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
778 | bf_held = bf; | |
779 | if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) { | |
780 | /* | |
781 | * The holding descriptor is the last | |
782 | * descriptor in queue. It's safe to | |
783 | * remove the last holding descriptor | |
784 | * in BH context. | |
785 | */ | |
786 | list_del(&bf_held->list); | |
787 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | |
788 | sc->sc_rxlink = NULL; | |
789 | ||
790 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | |
791 | list_add_tail(&bf_held->list, | |
792 | &sc->sc_rxbuf); | |
793 | ath_rx_buf_link(sc, bf_held); | |
794 | } | |
795 | spin_unlock_bh(&sc->sc_rxbuflock); | |
796 | break; | |
797 | } | |
798 | bf = list_entry(bf->list.next, struct ath_buf, list); | |
799 | } | |
800 | ||
801 | ds = bf->bf_desc; | |
802 | ++rx_processed; | |
803 | ||
804 | /* | |
805 | * Must provide the virtual address of the current | |
806 | * descriptor, the physical address, and the virtual | |
807 | * address of the next descriptor in the h/w chain. | |
808 | * This allows the HAL to look ahead to see if the | |
809 | * hardware is done with a descriptor by checking the | |
810 | * done bit in the following descriptor and the address | |
811 | * of the current descriptor the DMA engine is working | |
812 | * on. All this is necessary because of our use of | |
813 | * a self-linked list to avoid rx overruns. | |
814 | */ | |
815 | retval = ath9k_hw_rxprocdesc(ah, | |
816 | ds, | |
817 | bf->bf_daddr, | |
818 | PA2DESC(sc, ds->ds_link), | |
819 | 0); | |
820 | if (retval == -EINPROGRESS) { | |
821 | struct ath_buf *tbf; | |
822 | struct ath_desc *tds; | |
823 | ||
824 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | |
825 | spin_unlock_bh(&sc->sc_rxbuflock); | |
826 | break; | |
827 | } | |
828 | ||
829 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
830 | ||
831 | /* | |
832 | * On some hardware the descriptor status words could | |
833 | * get corrupted, including the done bit. Because of | |
834 | * this, check if the next descriptor's done bit is | |
835 | * set or not. | |
836 | * | |
837 | * If the next descriptor's done bit is set, the current | |
838 | * descriptor has been corrupted. Force s/w to discard | |
839 | * this descriptor and continue... | |
840 | */ | |
841 | ||
842 | tds = tbf->bf_desc; | |
843 | retval = ath9k_hw_rxprocdesc(ah, | |
844 | tds, tbf->bf_daddr, | |
845 | PA2DESC(sc, tds->ds_link), 0); | |
846 | if (retval == -EINPROGRESS) { | |
847 | spin_unlock_bh(&sc->sc_rxbuflock); | |
848 | break; | |
849 | } | |
850 | } | |
851 | ||
852 | /* XXX: we do not support frames spanning | |
853 | * multiple descriptors */ | |
854 | bf->bf_status |= ATH_BUFSTATUS_DONE; | |
855 | ||
856 | skb = bf->bf_mpdu; | |
857 | if (skb == NULL) { /* XXX ??? can this happen */ | |
858 | spin_unlock_bh(&sc->sc_rxbuflock); | |
859 | continue; | |
860 | } | |
861 | /* | |
862 | * Now we know it's a completed frame, we can indicate the | |
863 | * frame. Remove the previous holding descriptor and leave | |
864 | * this one in the queue as the new holding descriptor. | |
865 | */ | |
866 | if (bf_held) { | |
867 | list_del(&bf_held->list); | |
868 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | |
869 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | |
870 | list_add_tail(&bf_held->list, &sc->sc_rxbuf); | |
871 | /* try to requeue this descriptor */ | |
872 | ath_rx_buf_link(sc, bf_held); | |
873 | } | |
874 | } | |
875 | ||
876 | bf->bf_status |= ATH_BUFSTATUS_STALE; | |
877 | bf_held = bf; | |
878 | /* | |
879 | * Release the lock here in case ieee80211_input() return | |
880 | * the frame immediately by calling ath_rx_mpdu_requeue(). | |
881 | */ | |
882 | spin_unlock_bh(&sc->sc_rxbuflock); | |
883 | ||
884 | if (flush) { | |
885 | /* | |
886 | * If we're asked to flush receive queue, directly | |
887 | * chain it back at the queue without processing it. | |
888 | */ | |
889 | goto rx_next; | |
890 | } | |
891 | ||
892 | hdr = (struct ieee80211_hdr *)skb->data; | |
893 | fc = hdr->frame_control; | |
894 | memzero(&rx_status, sizeof(struct ath_recv_status)); | |
895 | ||
896 | if (ds->ds_rxstat.rs_more) { | |
897 | /* | |
898 | * Frame spans multiple descriptors; this | |
899 | * cannot happen yet as we don't support | |
900 | * jumbograms. If not in monitor mode, | |
901 | * discard the frame. | |
902 | */ | |
903 | #ifndef ERROR_FRAMES | |
904 | /* | |
905 | * Enable this if you want to see | |
906 | * error frames in Monitor mode. | |
907 | */ | |
b4696c8b | 908 | if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR) |
f078f209 LR |
909 | goto rx_next; |
910 | #endif | |
911 | /* fall thru for monitor mode handling... */ | |
912 | } else if (ds->ds_rxstat.rs_status != 0) { | |
913 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) | |
914 | rx_status.flags |= ATH_RX_FCS_ERROR; | |
915 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { | |
916 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; | |
917 | goto rx_next; | |
918 | } | |
919 | ||
920 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | |
921 | /* | |
922 | * Decrypt error. We only mark packet status | |
923 | * here and always push up the frame up to let | |
924 | * mac80211 handle the actual error case, be | |
925 | * it no decryption key or real decryption | |
926 | * error. This let us keep statistics there. | |
927 | */ | |
928 | rx_status.flags |= ATH_RX_DECRYPT_ERROR; | |
929 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | |
930 | /* | |
931 | * Demic error. We only mark frame status here | |
932 | * and always push up the frame up to let | |
933 | * mac80211 handle the actual error case. This | |
934 | * let us keep statistics there. Hardware may | |
935 | * post a false-positive MIC error. | |
936 | */ | |
937 | if (ieee80211_is_ctl(fc)) | |
938 | /* | |
939 | * Sometimes, we get invalid | |
940 | * MIC failures on valid control frames. | |
941 | * Remove these mic errors. | |
942 | */ | |
943 | ds->ds_rxstat.rs_status &= | |
944 | ~ATH9K_RXERR_MIC; | |
945 | else | |
946 | rx_status.flags |= ATH_RX_MIC_ERROR; | |
947 | } | |
948 | /* | |
949 | * Reject error frames with the exception of | |
950 | * decryption and MIC failures. For monitor mode, | |
951 | * we also ignore the CRC error. | |
952 | */ | |
b4696c8b | 953 | if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) { |
f078f209 LR |
954 | if (ds->ds_rxstat.rs_status & |
955 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | |
956 | ATH9K_RXERR_CRC)) | |
957 | goto rx_next; | |
958 | } else { | |
959 | if (ds->ds_rxstat.rs_status & | |
960 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | |
961 | goto rx_next; | |
962 | } | |
963 | } | |
964 | } | |
965 | /* | |
966 | * The status portion of the descriptor could get corrupted. | |
967 | */ | |
968 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | |
969 | goto rx_next; | |
970 | /* | |
971 | * Sync and unmap the frame. At this point we're | |
972 | * committed to passing the sk_buff somewhere so | |
973 | * clear buf_skb; this means a new sk_buff must be | |
974 | * allocated when the rx descriptor is setup again | |
975 | * to receive another frame. | |
976 | */ | |
977 | skb_put(skb, ds->ds_rxstat.rs_datalen); | |
978 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); | |
979 | rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); | |
980 | rx_status.rateieee = | |
981 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate; | |
982 | rx_status.rateKbps = | |
983 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps; | |
984 | rx_status.ratecode = ds->ds_rxstat.rs_rate; | |
985 | ||
986 | /* HT rate */ | |
987 | if (rx_status.ratecode & 0x80) { | |
988 | /* TODO - add table to avoid division */ | |
989 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | |
990 | rx_status.flags |= ATH_RX_40MHZ; | |
991 | rx_status.rateKbps = | |
992 | (rx_status.rateKbps * 27) / 13; | |
993 | } | |
994 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) | |
995 | rx_status.rateKbps = | |
996 | (rx_status.rateKbps * 10) / 9; | |
997 | else | |
998 | rx_status.flags |= ATH_RX_SHORT_GI; | |
999 | } | |
1000 | ||
1001 | /* sc->sc_noise_floor is only available when the station | |
1002 | attaches to an AP, so we use a default value | |
1003 | if we are not yet attached. */ | |
1004 | ||
1005 | /* XXX we should use either sc->sc_noise_floor or | |
1006 | * ath_hal_getChanNoise(ah, &sc->sc_curchan) | |
1007 | * to calculate the noise floor. | |
1008 | * However, the value returned by ath_hal_getChanNoise | |
1009 | * seems to be incorrect (-31dBm on the last test), | |
1010 | * so we will use a hard-coded value until we | |
1011 | * figure out what is going on. | |
1012 | */ | |
1013 | rx_status.abs_rssi = | |
1014 | ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; | |
1015 | ||
1016 | pci_dma_sync_single_for_cpu(sc->pdev, | |
1017 | bf->bf_buf_addr, | |
1018 | skb_tailroom(skb), | |
1019 | PCI_DMA_FROMDEVICE); | |
1020 | pci_unmap_single(sc->pdev, | |
1021 | bf->bf_buf_addr, | |
1022 | sc->sc_rxbufsize, | |
1023 | PCI_DMA_FROMDEVICE); | |
1024 | ||
1025 | /* XXX: Ah! make me more readable, use a helper */ | |
60b67f51 | 1026 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { |
f078f209 LR |
1027 | if (ds->ds_rxstat.rs_moreaggr == 0) { |
1028 | rx_status.rssictl[0] = | |
1029 | ds->ds_rxstat.rs_rssi_ctl0; | |
1030 | rx_status.rssictl[1] = | |
1031 | ds->ds_rxstat.rs_rssi_ctl1; | |
1032 | rx_status.rssictl[2] = | |
1033 | ds->ds_rxstat.rs_rssi_ctl2; | |
1034 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | |
1035 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | |
1036 | rx_status.rssiextn[0] = | |
1037 | ds->ds_rxstat.rs_rssi_ext0; | |
1038 | rx_status.rssiextn[1] = | |
1039 | ds->ds_rxstat.rs_rssi_ext1; | |
1040 | rx_status.rssiextn[2] = | |
1041 | ds->ds_rxstat.rs_rssi_ext2; | |
1042 | rx_status.flags |= | |
1043 | ATH_RX_RSSI_EXTN_VALID; | |
1044 | } | |
1045 | rx_status.flags |= ATH_RX_RSSI_VALID | | |
1046 | ATH_RX_CHAIN_RSSI_VALID; | |
1047 | } | |
1048 | } else { | |
1049 | /* | |
1050 | * Need to insert the "combined" rssi into the | |
1051 | * status structure for upper layer processing | |
1052 | */ | |
1053 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | |
1054 | rx_status.flags |= ATH_RX_RSSI_VALID; | |
1055 | } | |
1056 | ||
1057 | /* Pass frames up to the stack. */ | |
1058 | ||
1059 | type = ath_rx_indicate(sc, skb, | |
1060 | &rx_status, ds->ds_rxstat.rs_keyix); | |
1061 | ||
1062 | /* | |
1063 | * change the default rx antenna if rx diversity chooses the | |
1064 | * other antenna 3 times in a row. | |
1065 | */ | |
1066 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | |
1067 | if (++sc->sc_rxotherant >= 3) | |
1068 | ath_setdefantenna(sc, | |
1069 | ds->ds_rxstat.rs_antenna); | |
1070 | } else { | |
1071 | sc->sc_rxotherant = 0; | |
1072 | } | |
1073 | ||
1074 | #ifdef CONFIG_SLOW_ANT_DIV | |
1075 | if ((rx_status.flags & ATH_RX_RSSI_VALID) && | |
1076 | ieee80211_is_beacon(fc)) { | |
1077 | ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat); | |
1078 | } | |
1079 | #endif | |
1080 | /* | |
1081 | * For frames successfully indicated, the buffer will be | |
1082 | * returned to us by upper layers by calling | |
1083 | * ath_rx_mpdu_requeue, either synchronusly or asynchronously. | |
1084 | * So we don't want to do it here in this loop. | |
1085 | */ | |
1086 | continue; | |
1087 | ||
1088 | rx_next: | |
1089 | bf->bf_status |= ATH_BUFSTATUS_FREE; | |
1090 | } while (TRUE); | |
1091 | ||
1092 | if (chainreset) { | |
1093 | DPRINTF(sc, ATH_DBG_CONFIG, | |
1094 | "%s: Reset rx chain mask. " | |
1095 | "Do internal reset\n", __func__); | |
1096 | ASSERT(flush == 0); | |
f45144ef | 1097 | ath_reset(sc, false); |
f078f209 LR |
1098 | } |
1099 | ||
1100 | return 0; | |
1101 | #undef PA2DESC | |
1102 | } | |
1103 | ||
1104 | /* Process ADDBA request in per-TID data structure */ | |
1105 | ||
1106 | int ath_rx_aggr_start(struct ath_softc *sc, | |
1107 | const u8 *addr, | |
1108 | u16 tid, | |
1109 | u16 *ssn) | |
1110 | { | |
1111 | struct ath_arx_tid *rxtid; | |
1112 | struct ath_node *an; | |
1113 | struct ieee80211_hw *hw = sc->hw; | |
1114 | struct ieee80211_supported_band *sband; | |
1115 | u16 buffersize = 0; | |
1116 | ||
1117 | spin_lock_bh(&sc->node_lock); | |
1118 | an = ath_node_find(sc, (u8 *) addr); | |
1119 | spin_unlock_bh(&sc->node_lock); | |
1120 | ||
1121 | if (!an) { | |
1122 | DPRINTF(sc, ATH_DBG_AGGR, | |
1123 | "%s: Node not found to initialize RX aggregation\n", | |
1124 | __func__); | |
1125 | return -1; | |
1126 | } | |
1127 | ||
1128 | sband = hw->wiphy->bands[hw->conf.channel->band]; | |
1129 | buffersize = IEEE80211_MIN_AMPDU_BUF << | |
1130 | sband->ht_info.ampdu_factor; /* FIXME */ | |
1131 | ||
1132 | rxtid = &an->an_aggr.rx.tid[tid]; | |
1133 | ||
1134 | spin_lock_bh(&rxtid->tidlock); | |
672840ac | 1135 | if (sc->sc_flags & SC_OP_RXAGGR) { |
f078f209 LR |
1136 | /* Allow aggregation reception |
1137 | * Adjust rx BA window size. Peer might indicate a | |
1138 | * zero buffer size for a _dont_care_ condition. | |
1139 | */ | |
1140 | if (buffersize) | |
1141 | rxtid->baw_size = min(buffersize, rxtid->baw_size); | |
1142 | ||
1143 | /* set rx sequence number */ | |
1144 | rxtid->seq_next = *ssn; | |
1145 | ||
1146 | /* Allocate the receive buffers for this TID */ | |
1147 | DPRINTF(sc, ATH_DBG_AGGR, | |
1148 | "%s: Allcating rxbuffer for TID %d\n", __func__, tid); | |
1149 | ||
1150 | if (rxtid->rxbuf == NULL) { | |
1151 | /* | |
1152 | * If the rxbuff is not NULL at this point, we *probably* | |
1153 | * already allocated the buffer on a previous ADDBA, | |
1154 | * and this is a subsequent ADDBA that got through. | |
1155 | * Don't allocate, but use the value in the pointer, | |
1156 | * we zero it out when we de-allocate. | |
1157 | */ | |
1158 | rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS * | |
1159 | sizeof(struct ath_rxbuf), GFP_ATOMIC); | |
1160 | } | |
1161 | if (rxtid->rxbuf == NULL) { | |
1162 | DPRINTF(sc, ATH_DBG_AGGR, | |
1163 | "%s: Unable to allocate RX buffer, " | |
1164 | "refusing ADDBA\n", __func__); | |
1165 | } else { | |
1166 | /* Ensure the memory is zeroed out (all internal | |
1167 | * pointers are null) */ | |
1168 | memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS * | |
1169 | sizeof(struct ath_rxbuf)); | |
1170 | DPRINTF(sc, ATH_DBG_AGGR, | |
1171 | "%s: Allocated @%p\n", __func__, rxtid->rxbuf); | |
1172 | ||
1173 | /* Allow aggregation reception */ | |
1174 | rxtid->addba_exchangecomplete = 1; | |
1175 | } | |
1176 | } | |
1177 | spin_unlock_bh(&rxtid->tidlock); | |
1178 | ||
1179 | return 0; | |
1180 | } | |
1181 | ||
1182 | /* Process DELBA */ | |
1183 | ||
1184 | int ath_rx_aggr_stop(struct ath_softc *sc, | |
1185 | const u8 *addr, | |
1186 | u16 tid) | |
1187 | { | |
1188 | struct ath_node *an; | |
1189 | ||
1190 | spin_lock_bh(&sc->node_lock); | |
1191 | an = ath_node_find(sc, (u8 *) addr); | |
1192 | spin_unlock_bh(&sc->node_lock); | |
1193 | ||
1194 | if (!an) { | |
1195 | DPRINTF(sc, ATH_DBG_AGGR, | |
1196 | "%s: RX aggr stop for non-existent node\n", __func__); | |
1197 | return -1; | |
1198 | } | |
1199 | ||
1200 | ath_rx_aggr_teardown(sc, an, tid); | |
1201 | return 0; | |
1202 | } | |
1203 | ||
1204 | /* Rx aggregation tear down */ | |
1205 | ||
1206 | void ath_rx_aggr_teardown(struct ath_softc *sc, | |
1207 | struct ath_node *an, u8 tid) | |
1208 | { | |
1209 | struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid]; | |
1210 | ||
1211 | if (!rxtid->addba_exchangecomplete) | |
1212 | return; | |
1213 | ||
1214 | del_timer_sync(&rxtid->timer); | |
1215 | ath_rx_flush_tid(sc, rxtid, 0); | |
1216 | rxtid->addba_exchangecomplete = 0; | |
1217 | ||
1218 | /* De-allocate the receive buffer array allocated when addba started */ | |
1219 | ||
1220 | if (rxtid->rxbuf) { | |
1221 | DPRINTF(sc, ATH_DBG_AGGR, | |
1222 | "%s: Deallocating TID %d rxbuff @%p\n", | |
1223 | __func__, tid, rxtid->rxbuf); | |
1224 | kfree(rxtid->rxbuf); | |
1225 | ||
1226 | /* Set pointer to null to avoid reuse*/ | |
1227 | rxtid->rxbuf = NULL; | |
1228 | } | |
1229 | } | |
1230 | ||
1231 | /* Initialize per-node receive state */ | |
1232 | ||
1233 | void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) | |
1234 | { | |
672840ac | 1235 | if (sc->sc_flags & SC_OP_RXAGGR) { |
f078f209 LR |
1236 | struct ath_arx_tid *rxtid; |
1237 | int tidno; | |
1238 | ||
1239 | /* Init per tid rx state */ | |
1240 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | |
1241 | tidno < WME_NUM_TID; | |
1242 | tidno++, rxtid++) { | |
1243 | rxtid->an = an; | |
1244 | rxtid->seq_reset = 1; | |
1245 | rxtid->seq_next = 0; | |
1246 | rxtid->baw_size = WME_MAX_BA; | |
1247 | rxtid->baw_head = rxtid->baw_tail = 0; | |
1248 | ||
1249 | /* | |
1250 | * Ensure the buffer pointer is null at this point | |
1251 | * (needs to be allocated when addba is received) | |
1252 | */ | |
1253 | ||
1254 | rxtid->rxbuf = NULL; | |
1255 | setup_timer(&rxtid->timer, ath_rx_timer, | |
1256 | (unsigned long)rxtid); | |
1257 | spin_lock_init(&rxtid->tidlock); | |
1258 | ||
1259 | /* ADDBA state */ | |
1260 | rxtid->addba_exchangecomplete = 0; | |
1261 | } | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | |
1266 | { | |
672840ac | 1267 | if (sc->sc_flags & SC_OP_RXAGGR) { |
f078f209 LR |
1268 | struct ath_arx_tid *rxtid; |
1269 | int tidno, i; | |
1270 | ||
1271 | /* Init per tid rx state */ | |
1272 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | |
1273 | tidno < WME_NUM_TID; | |
1274 | tidno++, rxtid++) { | |
1275 | ||
1276 | if (!rxtid->addba_exchangecomplete) | |
1277 | continue; | |
1278 | ||
1279 | /* must cancel timer first */ | |
1280 | del_timer_sync(&rxtid->timer); | |
1281 | ||
1282 | /* drop any pending sub-frames */ | |
1283 | ath_rx_flush_tid(sc, rxtid, 1); | |
1284 | ||
1285 | for (i = 0; i < ATH_TID_MAX_BUFS; i++) | |
1286 | ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL); | |
1287 | ||
1288 | rxtid->addba_exchangecomplete = 0; | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | } | |
1293 | ||
1294 | /* Cleanup per-node receive state */ | |
1295 | ||
1296 | void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an) | |
1297 | { | |
1298 | ath_rx_node_cleanup(sc, an); | |
1299 | } | |
1300 | ||
1301 | dma_addr_t ath_skb_map_single(struct ath_softc *sc, | |
1302 | struct sk_buff *skb, | |
1303 | int direction, | |
1304 | dma_addr_t *pa) | |
1305 | { | |
1306 | /* | |
1307 | * NB: do NOT use skb->len, which is 0 on initialization. | |
1308 | * Use skb's entire data area instead. | |
1309 | */ | |
1310 | *pa = pci_map_single(sc->pdev, skb->data, | |
1311 | skb_end_pointer(skb) - skb->head, direction); | |
1312 | return *pa; | |
1313 | } | |
1314 | ||
1315 | void ath_skb_unmap_single(struct ath_softc *sc, | |
1316 | struct sk_buff *skb, | |
1317 | int direction, | |
1318 | dma_addr_t *pa) | |
1319 | { | |
1320 | /* Unmap skb's entire data area */ | |
1321 | pci_unmap_single(sc->pdev, *pa, | |
1322 | skb_end_pointer(skb) - skb->head, direction); | |
1323 | } |