Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / recv.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209 19
b5c80475
FF
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
21
102885a5
VT
22static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
23 int mindelta, int main_rssi_avg,
24 int alt_rssi_avg, int pkt_count)
25{
26 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
27 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
29}
30
ededf1f8
VT
31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
32{
33 return sc->ps_enabled &&
34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
35}
36
f078f209
LR
37/*
38 * Setup and link descriptors.
39 *
40 * 11N: we can no longer afford to self link the last descriptor.
41 * MAC acknowledges BA status as long as it copies frames to host
42 * buffer (or rx fifo). This can incorrectly acknowledge packets
43 * to a sender if last desc is self-linked.
f078f209 44 */
f078f209
LR
45static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
46{
cbe61d8a 47 struct ath_hw *ah = sc->sc_ah;
cc861f74 48 struct ath_common *common = ath9k_hw_common(ah);
f078f209
LR
49 struct ath_desc *ds;
50 struct sk_buff *skb;
51
52 ATH_RXBUF_RESET(bf);
53
54 ds = bf->bf_desc;
be0418ad 55 ds->ds_link = 0; /* link to null */
f078f209
LR
56 ds->ds_data = bf->bf_buf_addr;
57
be0418ad 58 /* virtual addr of the beginning of the buffer. */
f078f209 59 skb = bf->bf_mpdu;
9680e8a3 60 BUG_ON(skb == NULL);
f078f209
LR
61 ds->ds_vdata = skb->data;
62
cc861f74
LR
63 /*
64 * setup rx descriptors. The rx_bufsize here tells the hardware
b4b6cda2 65 * how much data it can DMA to us and that we are prepared
cc861f74
LR
66 * to process
67 */
b77f483f 68 ath9k_hw_setuprxdesc(ah, ds,
cc861f74 69 common->rx_bufsize,
f078f209
LR
70 0);
71
b77f483f 72 if (sc->rx.rxlink == NULL)
f078f209
LR
73 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
74 else
b77f483f 75 *sc->rx.rxlink = bf->bf_daddr;
f078f209 76
b77f483f 77 sc->rx.rxlink = &ds->ds_link;
f078f209
LR
78 ath9k_hw_rxena(ah);
79}
80
ff37e337
S
81static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
82{
83 /* XXX block beacon interrupts */
84 ath9k_hw_setantenna(sc->sc_ah, antenna);
b77f483f
S
85 sc->rx.defant = antenna;
86 sc->rx.rxotherant = 0;
ff37e337
S
87}
88
f078f209
LR
89static void ath_opmode_init(struct ath_softc *sc)
90{
cbe61d8a 91 struct ath_hw *ah = sc->sc_ah;
1510718d
LR
92 struct ath_common *common = ath9k_hw_common(ah);
93
f078f209
LR
94 u32 rfilt, mfilt[2];
95
96 /* configure rx filter */
97 rfilt = ath_calcrxfilter(sc);
98 ath9k_hw_setrxfilter(ah, rfilt);
99
100 /* configure bssid mask */
364734fa 101 ath_hw_setbssidmask(common);
f078f209
LR
102
103 /* configure operational mode */
104 ath9k_hw_setopmode(ah);
105
f078f209
LR
106 /* calculate and install multicast filter */
107 mfilt[0] = mfilt[1] = ~0;
f078f209 108 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
f078f209
LR
109}
110
b5c80475
FF
111static bool ath_rx_edma_buf_link(struct ath_softc *sc,
112 enum ath9k_rx_qtype qtype)
f078f209 113{
b5c80475
FF
114 struct ath_hw *ah = sc->sc_ah;
115 struct ath_rx_edma *rx_edma;
f078f209
LR
116 struct sk_buff *skb;
117 struct ath_buf *bf;
f078f209 118
b5c80475
FF
119 rx_edma = &sc->rx.rx_edma[qtype];
120 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
121 return false;
f078f209 122
b5c80475
FF
123 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
124 list_del_init(&bf->list);
f078f209 125
b5c80475
FF
126 skb = bf->bf_mpdu;
127
128 ATH_RXBUF_RESET(bf);
129 memset(skb->data, 0, ah->caps.rx_status_len);
130 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
131 ah->caps.rx_status_len, DMA_TO_DEVICE);
f078f209 132
b5c80475
FF
133 SKB_CB_ATHBUF(skb) = bf;
134 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
135 skb_queue_tail(&rx_edma->rx_fifo, skb);
f078f209 136
b5c80475
FF
137 return true;
138}
139
140static void ath_rx_addbuffer_edma(struct ath_softc *sc,
141 enum ath9k_rx_qtype qtype, int size)
142{
b5c80475
FF
143 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
144 u32 nbuf = 0;
145
b5c80475 146 if (list_empty(&sc->rx.rxbuf)) {
226afe68 147 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
b5c80475 148 return;
797fe5cb 149 }
f078f209 150
b5c80475
FF
151 while (!list_empty(&sc->rx.rxbuf)) {
152 nbuf++;
153
154 if (!ath_rx_edma_buf_link(sc, qtype))
155 break;
156
157 if (nbuf >= size)
158 break;
159 }
160}
161
162static void ath_rx_remove_buffer(struct ath_softc *sc,
163 enum ath9k_rx_qtype qtype)
164{
165 struct ath_buf *bf;
166 struct ath_rx_edma *rx_edma;
167 struct sk_buff *skb;
168
169 rx_edma = &sc->rx.rx_edma[qtype];
170
171 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
172 bf = SKB_CB_ATHBUF(skb);
173 BUG_ON(!bf);
174 list_add_tail(&bf->list, &sc->rx.rxbuf);
175 }
176}
177
178static void ath_rx_edma_cleanup(struct ath_softc *sc)
179{
180 struct ath_buf *bf;
181
182 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
183 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
184
797fe5cb 185 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
b5c80475
FF
186 if (bf->bf_mpdu)
187 dev_kfree_skb_any(bf->bf_mpdu);
188 }
189
190 INIT_LIST_HEAD(&sc->rx.rxbuf);
191
192 kfree(sc->rx.rx_bufptr);
193 sc->rx.rx_bufptr = NULL;
194}
195
196static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
197{
198 skb_queue_head_init(&rx_edma->rx_fifo);
199 skb_queue_head_init(&rx_edma->rx_buffers);
200 rx_edma->rx_fifo_hwsize = size;
201}
202
203static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
204{
205 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
206 struct ath_hw *ah = sc->sc_ah;
207 struct sk_buff *skb;
208 struct ath_buf *bf;
209 int error = 0, i;
210 u32 size;
211
b5c80475
FF
212 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
213 ah->caps.rx_status_len);
214
215 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
216 ah->caps.rx_lp_qdepth);
217 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
218 ah->caps.rx_hp_qdepth);
219
220 size = sizeof(struct ath_buf) * nbufs;
221 bf = kzalloc(size, GFP_KERNEL);
222 if (!bf)
223 return -ENOMEM;
224
225 INIT_LIST_HEAD(&sc->rx.rxbuf);
226 sc->rx.rx_bufptr = bf;
227
228 for (i = 0; i < nbufs; i++, bf++) {
cc861f74 229 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
b5c80475 230 if (!skb) {
797fe5cb 231 error = -ENOMEM;
b5c80475 232 goto rx_init_fail;
f078f209 233 }
f078f209 234
b5c80475 235 memset(skb->data, 0, common->rx_bufsize);
797fe5cb 236 bf->bf_mpdu = skb;
b5c80475 237
797fe5cb 238 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
cc861f74 239 common->rx_bufsize,
b5c80475 240 DMA_BIDIRECTIONAL);
797fe5cb 241 if (unlikely(dma_mapping_error(sc->dev,
b5c80475
FF
242 bf->bf_buf_addr))) {
243 dev_kfree_skb_any(skb);
244 bf->bf_mpdu = NULL;
6cf9e995 245 bf->bf_buf_addr = 0;
3800276a 246 ath_err(common,
b5c80475
FF
247 "dma_mapping_error() on RX init\n");
248 error = -ENOMEM;
249 goto rx_init_fail;
250 }
251
252 list_add_tail(&bf->list, &sc->rx.rxbuf);
253 }
254
255 return 0;
256
257rx_init_fail:
258 ath_rx_edma_cleanup(sc);
259 return error;
260}
261
262static void ath_edma_start_recv(struct ath_softc *sc)
263{
264 spin_lock_bh(&sc->rx.rxbuflock);
265
266 ath9k_hw_rxena(sc->sc_ah);
267
268 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
269 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
270
271 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
272 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
273
b5c80475
FF
274 ath_opmode_init(sc);
275
48a6a468 276 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
7583c550
LR
277
278 spin_unlock_bh(&sc->rx.rxbuflock);
b5c80475
FF
279}
280
281static void ath_edma_stop_recv(struct ath_softc *sc)
282{
b5c80475
FF
283 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
284 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
b5c80475
FF
285}
286
287int ath_rx_init(struct ath_softc *sc, int nbufs)
288{
289 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
290 struct sk_buff *skb;
291 struct ath_buf *bf;
292 int error = 0;
293
4bdd1e97 294 spin_lock_init(&sc->sc_pcu_lock);
b5c80475
FF
295 sc->sc_flags &= ~SC_OP_RXFLUSH;
296 spin_lock_init(&sc->rx.rxbuflock);
297
0d95521e
FF
298 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
299 sc->sc_ah->caps.rx_status_len;
300
b5c80475
FF
301 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
302 return ath_rx_edma_init(sc, nbufs);
303 } else {
226afe68
JP
304 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
305 common->cachelsz, common->rx_bufsize);
b5c80475
FF
306
307 /* Initialize rx descriptors */
308
309 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
4adfcded 310 "rx", nbufs, 1, 0);
b5c80475 311 if (error != 0) {
3800276a
JP
312 ath_err(common,
313 "failed to allocate rx descriptors: %d\n",
314 error);
797fe5cb
S
315 goto err;
316 }
b5c80475
FF
317
318 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
319 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
320 GFP_KERNEL);
321 if (skb == NULL) {
322 error = -ENOMEM;
323 goto err;
324 }
325
326 bf->bf_mpdu = skb;
327 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
328 common->rx_bufsize,
329 DMA_FROM_DEVICE);
330 if (unlikely(dma_mapping_error(sc->dev,
331 bf->bf_buf_addr))) {
332 dev_kfree_skb_any(skb);
333 bf->bf_mpdu = NULL;
6cf9e995 334 bf->bf_buf_addr = 0;
3800276a
JP
335 ath_err(common,
336 "dma_mapping_error() on RX init\n");
b5c80475
FF
337 error = -ENOMEM;
338 goto err;
339 }
b5c80475
FF
340 }
341 sc->rx.rxlink = NULL;
797fe5cb 342 }
f078f209 343
797fe5cb 344err:
f078f209
LR
345 if (error)
346 ath_rx_cleanup(sc);
347
348 return error;
349}
350
f078f209
LR
351void ath_rx_cleanup(struct ath_softc *sc)
352{
cc861f74
LR
353 struct ath_hw *ah = sc->sc_ah;
354 struct ath_common *common = ath9k_hw_common(ah);
f078f209
LR
355 struct sk_buff *skb;
356 struct ath_buf *bf;
357
b5c80475
FF
358 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
359 ath_rx_edma_cleanup(sc);
360 return;
361 } else {
362 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
363 skb = bf->bf_mpdu;
364 if (skb) {
365 dma_unmap_single(sc->dev, bf->bf_buf_addr,
366 common->rx_bufsize,
367 DMA_FROM_DEVICE);
368 dev_kfree_skb(skb);
6cf9e995
BG
369 bf->bf_buf_addr = 0;
370 bf->bf_mpdu = NULL;
b5c80475 371 }
051b9191 372 }
f078f209 373
b5c80475
FF
374 if (sc->rx.rxdma.dd_desc_len != 0)
375 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
376 }
f078f209
LR
377}
378
379/*
380 * Calculate the receive filter according to the
381 * operating mode and state:
382 *
383 * o always accept unicast, broadcast, and multicast traffic
384 * o maintain current state of phy error reception (the hal
385 * may enable phy error frames for noise immunity work)
386 * o probe request frames are accepted only when operating in
387 * hostap, adhoc, or monitor modes
388 * o enable promiscuous mode according to the interface state
389 * o accept beacons:
390 * - when operating in adhoc mode so the 802.11 layer creates
391 * node table entries for peers,
392 * - when operating in station mode for collecting rssi data when
393 * the station is otherwise quiet, or
394 * - when operating as a repeater so we see repeater-sta beacons
395 * - when scanning
396 */
397
398u32 ath_calcrxfilter(struct ath_softc *sc)
399{
400#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
7dcfdcd9 401
f078f209
LR
402 u32 rfilt;
403
404 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
405 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
406 | ATH9K_RX_FILTER_MCAST;
407
9c1d8e4a 408 if (sc->rx.rxfilter & FIF_PROBE_REQ)
f078f209
LR
409 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
410
217ba9da
JM
411 /*
412 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
413 * mode interface or when in monitor mode. AP mode does not need this
414 * since it receives all in-BSS frames anyway.
415 */
2660b81a 416 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
b77f483f 417 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
5f841b41 418 (sc->sc_ah->is_monitoring))
f078f209 419 rfilt |= ATH9K_RX_FILTER_PROM;
f078f209 420
d42c6b71
S
421 if (sc->rx.rxfilter & FIF_CONTROL)
422 rfilt |= ATH9K_RX_FILTER_CONTROL;
423
dbaaa147 424 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
cfda6695 425 (sc->nvifs <= 1) &&
dbaaa147
VT
426 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
427 rfilt |= ATH9K_RX_FILTER_MYBEACON;
428 else
f078f209
LR
429 rfilt |= ATH9K_RX_FILTER_BEACON;
430
7a37081e 431 if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) ||
e17f83ea 432 AR_SREV_9285_12_OR_LATER(sc->sc_ah)) &&
66afad01
SB
433 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
434 (sc->rx.rxfilter & FIF_PSPOLL))
dbaaa147 435 rfilt |= ATH9K_RX_FILTER_PSPOLL;
be0418ad 436
7ea310be
S
437 if (conf_is_ht(&sc->hw->conf))
438 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
439
7545daf4 440 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
5eb6ba83
JC
441 /* The following may also be needed for other older chips */
442 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
443 rfilt |= ATH9K_RX_FILTER_PROM;
b93bce2a
JM
444 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
445 }
446
f078f209 447 return rfilt;
7dcfdcd9 448
f078f209
LR
449#undef RX_FILTER_PRESERVE
450}
451
f078f209
LR
452int ath_startrecv(struct ath_softc *sc)
453{
cbe61d8a 454 struct ath_hw *ah = sc->sc_ah;
f078f209
LR
455 struct ath_buf *bf, *tbf;
456
b5c80475
FF
457 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
458 ath_edma_start_recv(sc);
459 return 0;
460 }
461
b77f483f
S
462 spin_lock_bh(&sc->rx.rxbuflock);
463 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
464 goto start_recv;
465
b77f483f
S
466 sc->rx.rxlink = NULL;
467 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
f078f209
LR
468 ath_rx_buf_link(sc, bf);
469 }
470
471 /* We could have deleted elements so the list may be empty now */
b77f483f 472 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
473 goto start_recv;
474
b77f483f 475 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
f078f209 476 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
be0418ad 477 ath9k_hw_rxena(ah);
f078f209
LR
478
479start_recv:
be0418ad 480 ath_opmode_init(sc);
48a6a468 481 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
be0418ad 482
7583c550
LR
483 spin_unlock_bh(&sc->rx.rxbuflock);
484
f078f209
LR
485 return 0;
486}
487
f078f209
LR
488bool ath_stoprecv(struct ath_softc *sc)
489{
cbe61d8a 490 struct ath_hw *ah = sc->sc_ah;
f078f209
LR
491 bool stopped;
492
1e450285 493 spin_lock_bh(&sc->rx.rxbuflock);
d47844a0 494 ath9k_hw_abortpcurecv(ah);
be0418ad
S
495 ath9k_hw_setrxfilter(ah, 0);
496 stopped = ath9k_hw_stopdmarecv(ah);
b5c80475
FF
497
498 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
499 ath_edma_stop_recv(sc);
500 else
501 sc->rx.rxlink = NULL;
1e450285 502 spin_unlock_bh(&sc->rx.rxbuflock);
be0418ad 503
d584747b
RM
504 if (!(ah->ah_flags & AH_UNPLUGGED) &&
505 unlikely(!stopped)) {
d7fd1b50
BG
506 ath_err(ath9k_hw_common(sc->sc_ah),
507 "Could not stop RX, we could be "
508 "confusing the DMA engine when we start RX up\n");
509 ATH_DBG_WARN_ON_ONCE(!stopped);
510 }
f078f209
LR
511 return stopped;
512}
513
f078f209
LR
514void ath_flushrecv(struct ath_softc *sc)
515{
98deeea0 516 sc->sc_flags |= SC_OP_RXFLUSH;
b5c80475
FF
517 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
518 ath_rx_tasklet(sc, 1, true);
519 ath_rx_tasklet(sc, 1, false);
98deeea0 520 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
521}
522
cc65965c
JM
523static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
524{
525 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
526 struct ieee80211_mgmt *mgmt;
527 u8 *pos, *end, id, elen;
528 struct ieee80211_tim_ie *tim;
529
530 mgmt = (struct ieee80211_mgmt *)skb->data;
531 pos = mgmt->u.beacon.variable;
532 end = skb->data + skb->len;
533
534 while (pos + 2 < end) {
535 id = *pos++;
536 elen = *pos++;
537 if (pos + elen > end)
538 break;
539
540 if (id == WLAN_EID_TIM) {
541 if (elen < sizeof(*tim))
542 break;
543 tim = (struct ieee80211_tim_ie *) pos;
544 if (tim->dtim_count != 0)
545 break;
546 return tim->bitmap_ctrl & 0x01;
547 }
548
549 pos += elen;
550 }
551
552 return false;
553}
554
cc65965c
JM
555static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
556{
557 struct ieee80211_mgmt *mgmt;
1510718d 558 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
559
560 if (skb->len < 24 + 8 + 2 + 2)
561 return;
562
563 mgmt = (struct ieee80211_mgmt *)skb->data;
4801416c
BG
564 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
565 /* TODO: This doesn't work well if you have stations
566 * associated to two different APs because curbssid
567 * is just the last AP that any of the stations associated
568 * with.
569 */
cc65965c 570 return; /* not from our current AP */
4801416c 571 }
cc65965c 572
1b04b930 573 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
293dc5df 574
1b04b930
S
575 if (sc->ps_flags & PS_BEACON_SYNC) {
576 sc->ps_flags &= ~PS_BEACON_SYNC;
226afe68
JP
577 ath_dbg(common, ATH_DBG_PS,
578 "Reconfigure Beacon timers based on timestamp from the AP\n");
ccdfeab6
JM
579 ath_beacon_config(sc, NULL);
580 }
581
cc65965c
JM
582 if (ath_beacon_dtim_pending_cab(skb)) {
583 /*
584 * Remain awake waiting for buffered broadcast/multicast
58f5fffd
GJ
585 * frames. If the last broadcast/multicast frame is not
586 * received properly, the next beacon frame will work as
587 * a backup trigger for returning into NETWORK SLEEP state,
588 * so we are waiting for it as well.
cc65965c 589 */
226afe68
JP
590 ath_dbg(common, ATH_DBG_PS,
591 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
1b04b930 592 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
cc65965c
JM
593 return;
594 }
595
1b04b930 596 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
cc65965c
JM
597 /*
598 * This can happen if a broadcast frame is dropped or the AP
599 * fails to send a frame indicating that all CAB frames have
600 * been delivered.
601 */
1b04b930 602 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
226afe68
JP
603 ath_dbg(common, ATH_DBG_PS,
604 "PS wait for CAB frames timed out\n");
cc65965c 605 }
cc65965c
JM
606}
607
608static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
609{
610 struct ieee80211_hdr *hdr;
c46917bb 611 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
612
613 hdr = (struct ieee80211_hdr *)skb->data;
614
615 /* Process Beacon and CAB receive in PS state */
ededf1f8
VT
616 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
617 && ieee80211_is_beacon(hdr->frame_control))
cc65965c 618 ath_rx_ps_beacon(sc, skb);
1b04b930 619 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
cc65965c
JM
620 (ieee80211_is_data(hdr->frame_control) ||
621 ieee80211_is_action(hdr->frame_control)) &&
622 is_multicast_ether_addr(hdr->addr1) &&
623 !ieee80211_has_moredata(hdr->frame_control)) {
cc65965c
JM
624 /*
625 * No more broadcast/multicast frames to be received at this
626 * point.
627 */
3fac6dfd 628 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
226afe68
JP
629 ath_dbg(common, ATH_DBG_PS,
630 "All PS CAB frames received, back to sleep\n");
1b04b930 631 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
9a23f9ca
JM
632 !is_multicast_ether_addr(hdr->addr1) &&
633 !ieee80211_has_morefrags(hdr->frame_control)) {
1b04b930 634 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
226afe68
JP
635 ath_dbg(common, ATH_DBG_PS,
636 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
1b04b930
S
637 sc->ps_flags & (PS_WAIT_FOR_BEACON |
638 PS_WAIT_FOR_CAB |
639 PS_WAIT_FOR_PSPOLL_DATA |
640 PS_WAIT_FOR_TX_ACK));
cc65965c
JM
641 }
642}
643
b5c80475
FF
644static bool ath_edma_get_buffers(struct ath_softc *sc,
645 enum ath9k_rx_qtype qtype)
f078f209 646{
b5c80475
FF
647 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
648 struct ath_hw *ah = sc->sc_ah;
649 struct ath_common *common = ath9k_hw_common(ah);
650 struct sk_buff *skb;
651 struct ath_buf *bf;
652 int ret;
653
654 skb = skb_peek(&rx_edma->rx_fifo);
655 if (!skb)
656 return false;
657
658 bf = SKB_CB_ATHBUF(skb);
659 BUG_ON(!bf);
660
ce9426d1 661 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
662 common->rx_bufsize, DMA_FROM_DEVICE);
663
664 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
ce9426d1
ML
665 if (ret == -EINPROGRESS) {
666 /*let device gain the buffer again*/
667 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
668 common->rx_bufsize, DMA_FROM_DEVICE);
b5c80475 669 return false;
ce9426d1 670 }
b5c80475
FF
671
672 __skb_unlink(skb, &rx_edma->rx_fifo);
673 if (ret == -EINVAL) {
674 /* corrupt descriptor, skip this one and the following one */
675 list_add_tail(&bf->list, &sc->rx.rxbuf);
676 ath_rx_edma_buf_link(sc, qtype);
677 skb = skb_peek(&rx_edma->rx_fifo);
678 if (!skb)
679 return true;
680
681 bf = SKB_CB_ATHBUF(skb);
682 BUG_ON(!bf);
683
684 __skb_unlink(skb, &rx_edma->rx_fifo);
685 list_add_tail(&bf->list, &sc->rx.rxbuf);
686 ath_rx_edma_buf_link(sc, qtype);
083e3e8d 687 return true;
b5c80475
FF
688 }
689 skb_queue_tail(&rx_edma->rx_buffers, skb);
690
691 return true;
692}
f078f209 693
b5c80475
FF
694static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
695 struct ath_rx_status *rs,
696 enum ath9k_rx_qtype qtype)
697{
698 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
699 struct sk_buff *skb;
be0418ad 700 struct ath_buf *bf;
b5c80475
FF
701
702 while (ath_edma_get_buffers(sc, qtype));
703 skb = __skb_dequeue(&rx_edma->rx_buffers);
704 if (!skb)
705 return NULL;
706
707 bf = SKB_CB_ATHBUF(skb);
708 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
709 return bf;
710}
711
712static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
713 struct ath_rx_status *rs)
714{
715 struct ath_hw *ah = sc->sc_ah;
716 struct ath_common *common = ath9k_hw_common(ah);
f078f209 717 struct ath_desc *ds;
b5c80475
FF
718 struct ath_buf *bf;
719 int ret;
720
721 if (list_empty(&sc->rx.rxbuf)) {
722 sc->rx.rxlink = NULL;
723 return NULL;
724 }
725
726 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
727 ds = bf->bf_desc;
728
729 /*
730 * Must provide the virtual address of the current
731 * descriptor, the physical address, and the virtual
732 * address of the next descriptor in the h/w chain.
733 * This allows the HAL to look ahead to see if the
734 * hardware is done with a descriptor by checking the
735 * done bit in the following descriptor and the address
736 * of the current descriptor the DMA engine is working
737 * on. All this is necessary because of our use of
738 * a self-linked list to avoid rx overruns.
739 */
740 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
741 if (ret == -EINPROGRESS) {
742 struct ath_rx_status trs;
743 struct ath_buf *tbf;
744 struct ath_desc *tds;
745
746 memset(&trs, 0, sizeof(trs));
747 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
748 sc->rx.rxlink = NULL;
749 return NULL;
750 }
751
752 tbf = list_entry(bf->list.next, struct ath_buf, list);
753
754 /*
755 * On some hardware the descriptor status words could
756 * get corrupted, including the done bit. Because of
757 * this, check if the next descriptor's done bit is
758 * set or not.
759 *
760 * If the next descriptor's done bit is set, the current
761 * descriptor has been corrupted. Force s/w to discard
762 * this descriptor and continue...
763 */
764
765 tds = tbf->bf_desc;
766 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
767 if (ret == -EINPROGRESS)
768 return NULL;
769 }
770
771 if (!bf->bf_mpdu)
772 return bf;
773
774 /*
775 * Synchronize the DMA transfer with CPU before
776 * 1. accessing the frame
777 * 2. requeueing the same buffer to h/w
778 */
ce9426d1 779 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
780 common->rx_bufsize,
781 DMA_FROM_DEVICE);
782
783 return bf;
784}
785
d435700f
S
786/* Assumes you've already done the endian to CPU conversion */
787static bool ath9k_rx_accept(struct ath_common *common,
9f167f64 788 struct ieee80211_hdr *hdr,
d435700f
S
789 struct ieee80211_rx_status *rxs,
790 struct ath_rx_status *rx_stats,
791 bool *decrypt_error)
792{
38852b20
SB
793#define is_mc_or_valid_tkip_keyix ((is_mc || \
794 (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
795 test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
796
d435700f 797 struct ath_hw *ah = common->ah;
d435700f 798 __le16 fc;
b7b1b512 799 u8 rx_status_len = ah->caps.rx_status_len;
d435700f 800
d435700f
S
801 fc = hdr->frame_control;
802
803 if (!rx_stats->rs_datalen)
804 return false;
805 /*
806 * rs_status follows rs_datalen so if rs_datalen is too large
807 * we can take a hint that hardware corrupted it, so ignore
808 * those frames.
809 */
b7b1b512 810 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
d435700f
S
811 return false;
812
0d95521e 813 /* Only use error bits from the last fragment */
d435700f 814 if (rx_stats->rs_more)
0d95521e 815 return true;
d435700f
S
816
817 /*
818 * The rx_stats->rs_status will not be set until the end of the
819 * chained descriptors so it can be ignored if rs_more is set. The
820 * rs_more will be false at the last element of the chained
821 * descriptors.
822 */
823 if (rx_stats->rs_status != 0) {
824 if (rx_stats->rs_status & ATH9K_RXERR_CRC)
825 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
826 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
827 return false;
828
829 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
830 *decrypt_error = true;
831 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
38852b20 832 bool is_mc;
56363dde
FF
833 /*
834 * The MIC error bit is only valid if the frame
835 * is not a control frame or fragment, and it was
836 * decrypted using a valid TKIP key.
837 */
38852b20
SB
838 is_mc = !!is_multicast_ether_addr(hdr->addr1);
839
56363dde
FF
840 if (!ieee80211_is_ctl(fc) &&
841 !ieee80211_has_morefrags(fc) &&
842 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
38852b20 843 is_mc_or_valid_tkip_keyix)
d435700f 844 rxs->flag |= RX_FLAG_MMIC_ERROR;
56363dde
FF
845 else
846 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
d435700f
S
847 }
848 /*
849 * Reject error frames with the exception of
850 * decryption and MIC failures. For monitor mode,
851 * we also ignore the CRC error.
852 */
5f841b41 853 if (ah->is_monitoring) {
d435700f
S
854 if (rx_stats->rs_status &
855 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
856 ATH9K_RXERR_CRC))
857 return false;
858 } else {
859 if (rx_stats->rs_status &
860 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
861 return false;
862 }
863 }
864 }
865 return true;
866}
867
868static int ath9k_process_rate(struct ath_common *common,
869 struct ieee80211_hw *hw,
870 struct ath_rx_status *rx_stats,
9f167f64 871 struct ieee80211_rx_status *rxs)
d435700f
S
872{
873 struct ieee80211_supported_band *sband;
874 enum ieee80211_band band;
875 unsigned int i = 0;
876
877 band = hw->conf.channel->band;
878 sband = hw->wiphy->bands[band];
879
880 if (rx_stats->rs_rate & 0x80) {
881 /* HT rate */
882 rxs->flag |= RX_FLAG_HT;
883 if (rx_stats->rs_flags & ATH9K_RX_2040)
884 rxs->flag |= RX_FLAG_40MHZ;
885 if (rx_stats->rs_flags & ATH9K_RX_GI)
886 rxs->flag |= RX_FLAG_SHORT_GI;
887 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
888 return 0;
889 }
890
891 for (i = 0; i < sband->n_bitrates; i++) {
892 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
893 rxs->rate_idx = i;
894 return 0;
895 }
896 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
897 rxs->flag |= RX_FLAG_SHORTPRE;
898 rxs->rate_idx = i;
899 return 0;
900 }
901 }
902
903 /*
904 * No valid hardware bitrate found -- we should not get here
905 * because hardware has already validated this frame as OK.
906 */
226afe68
JP
907 ath_dbg(common, ATH_DBG_XMIT,
908 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
909 rx_stats->rs_rate);
d435700f
S
910
911 return -EINVAL;
912}
913
914static void ath9k_process_rssi(struct ath_common *common,
915 struct ieee80211_hw *hw,
9f167f64 916 struct ieee80211_hdr *hdr,
d435700f
S
917 struct ath_rx_status *rx_stats)
918{
9ac58615 919 struct ath_softc *sc = hw->priv;
d435700f 920 struct ath_hw *ah = common->ah;
9fa23e17 921 int last_rssi;
d435700f
S
922 __le16 fc;
923
9fa23e17
FF
924 if (ah->opmode != NL80211_IFTYPE_STATION)
925 return;
926
d435700f 927 fc = hdr->frame_control;
9fa23e17 928 if (!ieee80211_is_beacon(fc) ||
4801416c
BG
929 compare_ether_addr(hdr->addr3, common->curbssid)) {
930 /* TODO: This doesn't work well if you have stations
931 * associated to two different APs because curbssid
932 * is just the last AP that any of the stations associated
933 * with.
934 */
9fa23e17 935 return;
4801416c 936 }
d435700f 937
9fa23e17 938 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
9ac58615 939 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
d435700f 940
9ac58615 941 last_rssi = sc->last_rssi;
d435700f
S
942 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
943 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
944 ATH_RSSI_EP_MULTIPLIER);
945 if (rx_stats->rs_rssi < 0)
946 rx_stats->rs_rssi = 0;
947
948 /* Update Beacon RSSI, this is used by ANI. */
9fa23e17 949 ah->stats.avgbrssi = rx_stats->rs_rssi;
d435700f
S
950}
951
952/*
953 * For Decrypt or Demic errors, we only mark packet status here and always push
954 * up the frame up to let mac80211 handle the actual error case, be it no
955 * decryption key or real decryption error. This let us keep statistics there.
956 */
957static int ath9k_rx_skb_preprocess(struct ath_common *common,
958 struct ieee80211_hw *hw,
9f167f64 959 struct ieee80211_hdr *hdr,
d435700f
S
960 struct ath_rx_status *rx_stats,
961 struct ieee80211_rx_status *rx_status,
962 bool *decrypt_error)
963{
d435700f
S
964 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
965
966 /*
967 * everything but the rate is checked here, the rate check is done
968 * separately to avoid doing two lookups for a rate for each frame.
969 */
9f167f64 970 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
d435700f
S
971 return -EINVAL;
972
0d95521e
FF
973 /* Only use status info from the last fragment */
974 if (rx_stats->rs_more)
975 return 0;
976
9f167f64 977 ath9k_process_rssi(common, hw, hdr, rx_stats);
d435700f 978
9f167f64 979 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
d435700f
S
980 return -EINVAL;
981
d435700f
S
982 rx_status->band = hw->conf.channel->band;
983 rx_status->freq = hw->conf.channel->center_freq;
984 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
985 rx_status->antenna = rx_stats->rs_antenna;
6ebacbb7 986 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
d435700f
S
987
988 return 0;
989}
990
991static void ath9k_rx_skb_postprocess(struct ath_common *common,
992 struct sk_buff *skb,
993 struct ath_rx_status *rx_stats,
994 struct ieee80211_rx_status *rxs,
995 bool decrypt_error)
996{
997 struct ath_hw *ah = common->ah;
998 struct ieee80211_hdr *hdr;
999 int hdrlen, padpos, padsize;
1000 u8 keyix;
1001 __le16 fc;
1002
1003 /* see if any padding is done by the hw and remove it */
1004 hdr = (struct ieee80211_hdr *) skb->data;
1005 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1006 fc = hdr->frame_control;
1007 padpos = ath9k_cmn_padpos(hdr->frame_control);
1008
1009 /* The MAC header is padded to have 32-bit boundary if the
1010 * packet payload is non-zero. The general calculation for
1011 * padsize would take into account odd header lengths:
1012 * padsize = (4 - padpos % 4) % 4; However, since only
1013 * even-length headers are used, padding can only be 0 or 2
1014 * bytes and we can optimize this a bit. In addition, we must
1015 * not try to remove padding from short control frames that do
1016 * not have payload. */
1017 padsize = padpos & 3;
1018 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1019 memmove(skb->data + padsize, skb->data, padpos);
1020 skb_pull(skb, padsize);
1021 }
1022
1023 keyix = rx_stats->rs_keyix;
1024
1025 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1026 ieee80211_has_protected(fc)) {
1027 rxs->flag |= RX_FLAG_DECRYPTED;
1028 } else if (ieee80211_has_protected(fc)
1029 && !decrypt_error && skb->len >= hdrlen + 4) {
1030 keyix = skb->data[hdrlen + 3] >> 6;
1031
1032 if (test_bit(keyix, common->keymap))
1033 rxs->flag |= RX_FLAG_DECRYPTED;
1034 }
1035 if (ah->sw_mgmt_crypto &&
1036 (rxs->flag & RX_FLAG_DECRYPTED) &&
1037 ieee80211_is_mgmt(fc))
1038 /* Use software decrypt for management frames. */
1039 rxs->flag &= ~RX_FLAG_DECRYPTED;
1040}
b5c80475 1041
102885a5
VT
1042static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1043 struct ath_hw_antcomb_conf ant_conf,
1044 int main_rssi_avg)
1045{
1046 antcomb->quick_scan_cnt = 0;
1047
1048 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1049 antcomb->rssi_lna2 = main_rssi_avg;
1050 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1051 antcomb->rssi_lna1 = main_rssi_avg;
1052
1053 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1054 case (0x10): /* LNA2 A-B */
1055 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1056 antcomb->first_quick_scan_conf =
1057 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1058 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1059 break;
1060 case (0x20): /* LNA1 A-B */
1061 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1062 antcomb->first_quick_scan_conf =
1063 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1064 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1065 break;
1066 case (0x21): /* LNA1 LNA2 */
1067 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1068 antcomb->first_quick_scan_conf =
1069 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1070 antcomb->second_quick_scan_conf =
1071 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1072 break;
1073 case (0x12): /* LNA2 LNA1 */
1074 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1075 antcomb->first_quick_scan_conf =
1076 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1077 antcomb->second_quick_scan_conf =
1078 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1079 break;
1080 case (0x13): /* LNA2 A+B */
1081 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1082 antcomb->first_quick_scan_conf =
1083 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1084 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1085 break;
1086 case (0x23): /* LNA1 A+B */
1087 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1088 antcomb->first_quick_scan_conf =
1089 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1090 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1091 break;
1092 default:
1093 break;
1094 }
1095}
1096
1097static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1098 struct ath_hw_antcomb_conf *div_ant_conf,
1099 int main_rssi_avg, int alt_rssi_avg,
1100 int alt_ratio)
1101{
1102 /* alt_good */
1103 switch (antcomb->quick_scan_cnt) {
1104 case 0:
1105 /* set alt to main, and alt to first conf */
1106 div_ant_conf->main_lna_conf = antcomb->main_conf;
1107 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1108 break;
1109 case 1:
1110 /* set alt to main, and alt to first conf */
1111 div_ant_conf->main_lna_conf = antcomb->main_conf;
1112 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1113 antcomb->rssi_first = main_rssi_avg;
1114 antcomb->rssi_second = alt_rssi_avg;
1115
1116 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1117 /* main is LNA1 */
1118 if (ath_is_alt_ant_ratio_better(alt_ratio,
1119 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1120 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1121 main_rssi_avg, alt_rssi_avg,
1122 antcomb->total_pkt_count))
1123 antcomb->first_ratio = true;
1124 else
1125 antcomb->first_ratio = false;
1126 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1127 if (ath_is_alt_ant_ratio_better(alt_ratio,
1128 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1129 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1130 main_rssi_avg, alt_rssi_avg,
1131 antcomb->total_pkt_count))
1132 antcomb->first_ratio = true;
1133 else
1134 antcomb->first_ratio = false;
1135 } else {
1136 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1137 (alt_rssi_avg > main_rssi_avg +
1138 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1139 (alt_rssi_avg > main_rssi_avg)) &&
1140 (antcomb->total_pkt_count > 50))
1141 antcomb->first_ratio = true;
1142 else
1143 antcomb->first_ratio = false;
1144 }
1145 break;
1146 case 2:
1147 antcomb->alt_good = false;
1148 antcomb->scan_not_start = false;
1149 antcomb->scan = false;
1150 antcomb->rssi_first = main_rssi_avg;
1151 antcomb->rssi_third = alt_rssi_avg;
1152
1153 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1154 antcomb->rssi_lna1 = alt_rssi_avg;
1155 else if (antcomb->second_quick_scan_conf ==
1156 ATH_ANT_DIV_COMB_LNA2)
1157 antcomb->rssi_lna2 = alt_rssi_avg;
1158 else if (antcomb->second_quick_scan_conf ==
1159 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1160 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1161 antcomb->rssi_lna2 = main_rssi_avg;
1162 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1163 antcomb->rssi_lna1 = main_rssi_avg;
1164 }
1165
1166 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1167 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1168 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1169 else
1170 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1171
1172 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1173 if (ath_is_alt_ant_ratio_better(alt_ratio,
1174 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1175 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1176 main_rssi_avg, alt_rssi_avg,
1177 antcomb->total_pkt_count))
1178 antcomb->second_ratio = true;
1179 else
1180 antcomb->second_ratio = false;
1181 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1182 if (ath_is_alt_ant_ratio_better(alt_ratio,
1183 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1184 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1185 main_rssi_avg, alt_rssi_avg,
1186 antcomb->total_pkt_count))
1187 antcomb->second_ratio = true;
1188 else
1189 antcomb->second_ratio = false;
1190 } else {
1191 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1192 (alt_rssi_avg > main_rssi_avg +
1193 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1194 (alt_rssi_avg > main_rssi_avg)) &&
1195 (antcomb->total_pkt_count > 50))
1196 antcomb->second_ratio = true;
1197 else
1198 antcomb->second_ratio = false;
1199 }
1200
1201 /* set alt to the conf with maximun ratio */
1202 if (antcomb->first_ratio && antcomb->second_ratio) {
1203 if (antcomb->rssi_second > antcomb->rssi_third) {
1204 /* first alt*/
1205 if ((antcomb->first_quick_scan_conf ==
1206 ATH_ANT_DIV_COMB_LNA1) ||
1207 (antcomb->first_quick_scan_conf ==
1208 ATH_ANT_DIV_COMB_LNA2))
1209 /* Set alt LNA1 or LNA2*/
1210 if (div_ant_conf->main_lna_conf ==
1211 ATH_ANT_DIV_COMB_LNA2)
1212 div_ant_conf->alt_lna_conf =
1213 ATH_ANT_DIV_COMB_LNA1;
1214 else
1215 div_ant_conf->alt_lna_conf =
1216 ATH_ANT_DIV_COMB_LNA2;
1217 else
1218 /* Set alt to A+B or A-B */
1219 div_ant_conf->alt_lna_conf =
1220 antcomb->first_quick_scan_conf;
1221 } else if ((antcomb->second_quick_scan_conf ==
1222 ATH_ANT_DIV_COMB_LNA1) ||
1223 (antcomb->second_quick_scan_conf ==
1224 ATH_ANT_DIV_COMB_LNA2)) {
1225 /* Set alt LNA1 or LNA2 */
1226 if (div_ant_conf->main_lna_conf ==
1227 ATH_ANT_DIV_COMB_LNA2)
1228 div_ant_conf->alt_lna_conf =
1229 ATH_ANT_DIV_COMB_LNA1;
1230 else
1231 div_ant_conf->alt_lna_conf =
1232 ATH_ANT_DIV_COMB_LNA2;
1233 } else {
1234 /* Set alt to A+B or A-B */
1235 div_ant_conf->alt_lna_conf =
1236 antcomb->second_quick_scan_conf;
1237 }
1238 } else if (antcomb->first_ratio) {
1239 /* first alt */
1240 if ((antcomb->first_quick_scan_conf ==
1241 ATH_ANT_DIV_COMB_LNA1) ||
1242 (antcomb->first_quick_scan_conf ==
1243 ATH_ANT_DIV_COMB_LNA2))
1244 /* Set alt LNA1 or LNA2 */
1245 if (div_ant_conf->main_lna_conf ==
1246 ATH_ANT_DIV_COMB_LNA2)
1247 div_ant_conf->alt_lna_conf =
1248 ATH_ANT_DIV_COMB_LNA1;
1249 else
1250 div_ant_conf->alt_lna_conf =
1251 ATH_ANT_DIV_COMB_LNA2;
1252 else
1253 /* Set alt to A+B or A-B */
1254 div_ant_conf->alt_lna_conf =
1255 antcomb->first_quick_scan_conf;
1256 } else if (antcomb->second_ratio) {
1257 /* second alt */
1258 if ((antcomb->second_quick_scan_conf ==
1259 ATH_ANT_DIV_COMB_LNA1) ||
1260 (antcomb->second_quick_scan_conf ==
1261 ATH_ANT_DIV_COMB_LNA2))
1262 /* Set alt LNA1 or LNA2 */
1263 if (div_ant_conf->main_lna_conf ==
1264 ATH_ANT_DIV_COMB_LNA2)
1265 div_ant_conf->alt_lna_conf =
1266 ATH_ANT_DIV_COMB_LNA1;
1267 else
1268 div_ant_conf->alt_lna_conf =
1269 ATH_ANT_DIV_COMB_LNA2;
1270 else
1271 /* Set alt to A+B or A-B */
1272 div_ant_conf->alt_lna_conf =
1273 antcomb->second_quick_scan_conf;
1274 } else {
1275 /* main is largest */
1276 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1277 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1278 /* Set alt LNA1 or LNA2 */
1279 if (div_ant_conf->main_lna_conf ==
1280 ATH_ANT_DIV_COMB_LNA2)
1281 div_ant_conf->alt_lna_conf =
1282 ATH_ANT_DIV_COMB_LNA1;
1283 else
1284 div_ant_conf->alt_lna_conf =
1285 ATH_ANT_DIV_COMB_LNA2;
1286 else
1287 /* Set alt to A+B or A-B */
1288 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1289 }
1290 break;
1291 default:
1292 break;
1293 }
1294}
1295
9bad82b8 1296static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf)
102885a5
VT
1297{
1298 /* Adjust the fast_div_bias based on main and alt lna conf */
1299 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) {
1300 case (0x01): /* A-B LNA2 */
1301 ant_conf->fast_div_bias = 0x3b;
1302 break;
1303 case (0x02): /* A-B LNA1 */
1304 ant_conf->fast_div_bias = 0x3d;
1305 break;
1306 case (0x03): /* A-B A+B */
1307 ant_conf->fast_div_bias = 0x1;
1308 break;
1309 case (0x10): /* LNA2 A-B */
1310 ant_conf->fast_div_bias = 0x7;
1311 break;
1312 case (0x12): /* LNA2 LNA1 */
1313 ant_conf->fast_div_bias = 0x2;
1314 break;
1315 case (0x13): /* LNA2 A+B */
1316 ant_conf->fast_div_bias = 0x7;
1317 break;
1318 case (0x20): /* LNA1 A-B */
1319 ant_conf->fast_div_bias = 0x6;
1320 break;
1321 case (0x21): /* LNA1 LNA2 */
1322 ant_conf->fast_div_bias = 0x0;
1323 break;
1324 case (0x23): /* LNA1 A+B */
1325 ant_conf->fast_div_bias = 0x6;
1326 break;
1327 case (0x30): /* A+B A-B */
1328 ant_conf->fast_div_bias = 0x1;
1329 break;
1330 case (0x31): /* A+B LNA2 */
1331 ant_conf->fast_div_bias = 0x3b;
1332 break;
1333 case (0x32): /* A+B LNA1 */
1334 ant_conf->fast_div_bias = 0x3d;
1335 break;
1336 default:
1337 break;
1338 }
1339}
1340
1341/* Antenna diversity and combining */
1342static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1343{
1344 struct ath_hw_antcomb_conf div_ant_conf;
1345 struct ath_ant_comb *antcomb = &sc->ant_comb;
1346 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1347 int curr_main_set, curr_bias;
1348 int main_rssi = rs->rs_rssi_ctl0;
1349 int alt_rssi = rs->rs_rssi_ctl1;
1350 int rx_ant_conf, main_ant_conf;
1351 bool short_scan = false;
1352
1353 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1354 ATH_ANT_RX_MASK;
1355 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1356 ATH_ANT_RX_MASK;
1357
1358 /* Record packet only when alt_rssi is positive */
1359 if (alt_rssi > 0) {
1360 antcomb->total_pkt_count++;
1361 antcomb->main_total_rssi += main_rssi;
1362 antcomb->alt_total_rssi += alt_rssi;
1363 if (main_ant_conf == rx_ant_conf)
1364 antcomb->main_recv_cnt++;
1365 else
1366 antcomb->alt_recv_cnt++;
1367 }
1368
1369 /* Short scan check */
1370 if (antcomb->scan && antcomb->alt_good) {
1371 if (time_after(jiffies, antcomb->scan_start_time +
1372 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1373 short_scan = true;
1374 else
1375 if (antcomb->total_pkt_count ==
1376 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1377 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1378 antcomb->total_pkt_count);
1379 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1380 short_scan = true;
1381 }
1382 }
1383
1384 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1385 rs->rs_moreaggr) && !short_scan)
1386 return;
1387
1388 if (antcomb->total_pkt_count) {
1389 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1390 antcomb->total_pkt_count);
1391 main_rssi_avg = (antcomb->main_total_rssi /
1392 antcomb->total_pkt_count);
1393 alt_rssi_avg = (antcomb->alt_total_rssi /
1394 antcomb->total_pkt_count);
1395 }
1396
1397
1398 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1399 curr_alt_set = div_ant_conf.alt_lna_conf;
1400 curr_main_set = div_ant_conf.main_lna_conf;
1401 curr_bias = div_ant_conf.fast_div_bias;
1402
1403 antcomb->count++;
1404
1405 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1406 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1407 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1408 main_rssi_avg);
1409 antcomb->alt_good = true;
1410 } else {
1411 antcomb->alt_good = false;
1412 }
1413
1414 antcomb->count = 0;
1415 antcomb->scan = true;
1416 antcomb->scan_not_start = true;
1417 }
1418
1419 if (!antcomb->scan) {
1420 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1421 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1422 /* Switch main and alt LNA */
1423 div_ant_conf.main_lna_conf =
1424 ATH_ANT_DIV_COMB_LNA2;
1425 div_ant_conf.alt_lna_conf =
1426 ATH_ANT_DIV_COMB_LNA1;
1427 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1428 div_ant_conf.main_lna_conf =
1429 ATH_ANT_DIV_COMB_LNA1;
1430 div_ant_conf.alt_lna_conf =
1431 ATH_ANT_DIV_COMB_LNA2;
1432 }
1433
1434 goto div_comb_done;
1435 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1436 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1437 /* Set alt to another LNA */
1438 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1439 div_ant_conf.alt_lna_conf =
1440 ATH_ANT_DIV_COMB_LNA1;
1441 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1442 div_ant_conf.alt_lna_conf =
1443 ATH_ANT_DIV_COMB_LNA2;
1444
1445 goto div_comb_done;
1446 }
1447
1448 if ((alt_rssi_avg < (main_rssi_avg +
1449 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA)))
1450 goto div_comb_done;
1451 }
1452
1453 if (!antcomb->scan_not_start) {
1454 switch (curr_alt_set) {
1455 case ATH_ANT_DIV_COMB_LNA2:
1456 antcomb->rssi_lna2 = alt_rssi_avg;
1457 antcomb->rssi_lna1 = main_rssi_avg;
1458 antcomb->scan = true;
1459 /* set to A+B */
1460 div_ant_conf.main_lna_conf =
1461 ATH_ANT_DIV_COMB_LNA1;
1462 div_ant_conf.alt_lna_conf =
1463 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1464 break;
1465 case ATH_ANT_DIV_COMB_LNA1:
1466 antcomb->rssi_lna1 = alt_rssi_avg;
1467 antcomb->rssi_lna2 = main_rssi_avg;
1468 antcomb->scan = true;
1469 /* set to A+B */
1470 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1471 div_ant_conf.alt_lna_conf =
1472 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1473 break;
1474 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1475 antcomb->rssi_add = alt_rssi_avg;
1476 antcomb->scan = true;
1477 /* set to A-B */
1478 div_ant_conf.alt_lna_conf =
1479 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1480 break;
1481 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1482 antcomb->rssi_sub = alt_rssi_avg;
1483 antcomb->scan = false;
1484 if (antcomb->rssi_lna2 >
1485 (antcomb->rssi_lna1 +
1486 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1487 /* use LNA2 as main LNA */
1488 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1489 (antcomb->rssi_add > antcomb->rssi_sub)) {
1490 /* set to A+B */
1491 div_ant_conf.main_lna_conf =
1492 ATH_ANT_DIV_COMB_LNA2;
1493 div_ant_conf.alt_lna_conf =
1494 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1495 } else if (antcomb->rssi_sub >
1496 antcomb->rssi_lna1) {
1497 /* set to A-B */
1498 div_ant_conf.main_lna_conf =
1499 ATH_ANT_DIV_COMB_LNA2;
1500 div_ant_conf.alt_lna_conf =
1501 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1502 } else {
1503 /* set to LNA1 */
1504 div_ant_conf.main_lna_conf =
1505 ATH_ANT_DIV_COMB_LNA2;
1506 div_ant_conf.alt_lna_conf =
1507 ATH_ANT_DIV_COMB_LNA1;
1508 }
1509 } else {
1510 /* use LNA1 as main LNA */
1511 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1512 (antcomb->rssi_add > antcomb->rssi_sub)) {
1513 /* set to A+B */
1514 div_ant_conf.main_lna_conf =
1515 ATH_ANT_DIV_COMB_LNA1;
1516 div_ant_conf.alt_lna_conf =
1517 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1518 } else if (antcomb->rssi_sub >
1519 antcomb->rssi_lna1) {
1520 /* set to A-B */
1521 div_ant_conf.main_lna_conf =
1522 ATH_ANT_DIV_COMB_LNA1;
1523 div_ant_conf.alt_lna_conf =
1524 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1525 } else {
1526 /* set to LNA2 */
1527 div_ant_conf.main_lna_conf =
1528 ATH_ANT_DIV_COMB_LNA1;
1529 div_ant_conf.alt_lna_conf =
1530 ATH_ANT_DIV_COMB_LNA2;
1531 }
1532 }
1533 break;
1534 default:
1535 break;
1536 }
1537 } else {
1538 if (!antcomb->alt_good) {
1539 antcomb->scan_not_start = false;
1540 /* Set alt to another LNA */
1541 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1542 div_ant_conf.main_lna_conf =
1543 ATH_ANT_DIV_COMB_LNA2;
1544 div_ant_conf.alt_lna_conf =
1545 ATH_ANT_DIV_COMB_LNA1;
1546 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1547 div_ant_conf.main_lna_conf =
1548 ATH_ANT_DIV_COMB_LNA1;
1549 div_ant_conf.alt_lna_conf =
1550 ATH_ANT_DIV_COMB_LNA2;
1551 }
1552 goto div_comb_done;
1553 }
1554 }
1555
1556 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1557 main_rssi_avg, alt_rssi_avg,
1558 alt_ratio);
1559
1560 antcomb->quick_scan_cnt++;
1561
1562div_comb_done:
1563 ath_ant_div_conf_fast_divbias(&div_ant_conf);
1564
1565 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1566
1567 antcomb->scan_start_time = jiffies;
1568 antcomb->total_pkt_count = 0;
1569 antcomb->main_total_rssi = 0;
1570 antcomb->alt_total_rssi = 0;
1571 antcomb->main_recv_cnt = 0;
1572 antcomb->alt_recv_cnt = 0;
1573}
1574
b5c80475
FF
1575int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1576{
1577 struct ath_buf *bf;
0d95521e 1578 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
5ca42627 1579 struct ieee80211_rx_status *rxs;
cbe61d8a 1580 struct ath_hw *ah = sc->sc_ah;
27c51f1a 1581 struct ath_common *common = ath9k_hw_common(ah);
b4afffc0 1582 /*
cae6b74d 1583 * The hw can technically differ from common->hw when using ath9k
b4afffc0
LR
1584 * virtual wiphy so to account for that we iterate over the active
1585 * wiphys and find the appropriate wiphy and therefore hw.
1586 */
7545daf4 1587 struct ieee80211_hw *hw = sc->hw;
be0418ad 1588 struct ieee80211_hdr *hdr;
c9b14170 1589 int retval;
be0418ad 1590 bool decrypt_error = false;
29bffa96 1591 struct ath_rx_status rs;
b5c80475
FF
1592 enum ath9k_rx_qtype qtype;
1593 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1594 int dma_type;
5c6dd921 1595 u8 rx_status_len = ah->caps.rx_status_len;
a6d2055b
FF
1596 u64 tsf = 0;
1597 u32 tsf_lower = 0;
8ab2cd09 1598 unsigned long flags;
be0418ad 1599
b5c80475 1600 if (edma)
b5c80475 1601 dma_type = DMA_BIDIRECTIONAL;
56824223
ML
1602 else
1603 dma_type = DMA_FROM_DEVICE;
b5c80475
FF
1604
1605 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
b77f483f 1606 spin_lock_bh(&sc->rx.rxbuflock);
f078f209 1607
a6d2055b
FF
1608 tsf = ath9k_hw_gettsf64(ah);
1609 tsf_lower = tsf & 0xffffffff;
1610
f078f209
LR
1611 do {
1612 /* If handling rx interrupt and flush is in progress => exit */
98deeea0 1613 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
f078f209
LR
1614 break;
1615
29bffa96 1616 memset(&rs, 0, sizeof(rs));
b5c80475
FF
1617 if (edma)
1618 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1619 else
1620 bf = ath_get_next_rx_buf(sc, &rs);
f078f209 1621
b5c80475
FF
1622 if (!bf)
1623 break;
f078f209 1624
f078f209 1625 skb = bf->bf_mpdu;
be0418ad 1626 if (!skb)
f078f209 1627 continue;
f078f209 1628
0d95521e
FF
1629 /*
1630 * Take frame header from the first fragment and RX status from
1631 * the last one.
1632 */
1633 if (sc->rx.frag)
1634 hdr_skb = sc->rx.frag;
1635 else
1636 hdr_skb = skb;
1637
1638 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1639 rxs = IEEE80211_SKB_RXCB(hdr_skb);
5ca42627 1640
29bffa96 1641 ath_debug_stat_rx(sc, &rs);
1395d3f0 1642
f078f209 1643 /*
be0418ad
S
1644 * If we're asked to flush receive queue, directly
1645 * chain it back at the queue without processing it.
f078f209 1646 */
be0418ad 1647 if (flush)
0d95521e 1648 goto requeue_drop_frag;
f078f209 1649
c8f3b721
JF
1650 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1651 rxs, &decrypt_error);
1652 if (retval)
0d95521e 1653 goto requeue_drop_frag;
c8f3b721 1654
a6d2055b
FF
1655 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1656 if (rs.rs_tstamp > tsf_lower &&
1657 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1658 rxs->mactime -= 0x100000000ULL;
1659
1660 if (rs.rs_tstamp < tsf_lower &&
1661 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1662 rxs->mactime += 0x100000000ULL;
1663
cb71d9ba
LR
1664 /* Ensure we always have an skb to requeue once we are done
1665 * processing the current buffer's skb */
cc861f74 1666 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
cb71d9ba
LR
1667
1668 /* If there is no memory we ignore the current RX'd frame,
1669 * tell hardware it can give us a new frame using the old
b77f483f 1670 * skb and put it at the tail of the sc->rx.rxbuf list for
cb71d9ba
LR
1671 * processing. */
1672 if (!requeue_skb)
0d95521e 1673 goto requeue_drop_frag;
f078f209 1674
9bf9fca8 1675 /* Unmap the frame */
7da3c55c 1676 dma_unmap_single(sc->dev, bf->bf_buf_addr,
cc861f74 1677 common->rx_bufsize,
b5c80475 1678 dma_type);
f078f209 1679
b5c80475
FF
1680 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1681 if (ah->caps.rx_status_len)
1682 skb_pull(skb, ah->caps.rx_status_len);
be0418ad 1683
0d95521e
FF
1684 if (!rs.rs_more)
1685 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1686 rxs, decrypt_error);
be0418ad 1687
cb71d9ba
LR
1688 /* We will now give hardware our shiny new allocated skb */
1689 bf->bf_mpdu = requeue_skb;
7da3c55c 1690 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
cc861f74 1691 common->rx_bufsize,
b5c80475 1692 dma_type);
7da3c55c 1693 if (unlikely(dma_mapping_error(sc->dev,
f8316df1
LR
1694 bf->bf_buf_addr))) {
1695 dev_kfree_skb_any(requeue_skb);
1696 bf->bf_mpdu = NULL;
6cf9e995 1697 bf->bf_buf_addr = 0;
3800276a 1698 ath_err(common, "dma_mapping_error() on RX\n");
7545daf4 1699 ieee80211_rx(hw, skb);
f8316df1
LR
1700 break;
1701 }
f078f209 1702
0d95521e
FF
1703 if (rs.rs_more) {
1704 /*
1705 * rs_more indicates chained descriptors which can be
1706 * used to link buffers together for a sort of
1707 * scatter-gather operation.
1708 */
1709 if (sc->rx.frag) {
1710 /* too many fragments - cannot handle frame */
1711 dev_kfree_skb_any(sc->rx.frag);
1712 dev_kfree_skb_any(skb);
1713 skb = NULL;
1714 }
1715 sc->rx.frag = skb;
1716 goto requeue;
1717 }
1718
1719 if (sc->rx.frag) {
1720 int space = skb->len - skb_tailroom(hdr_skb);
1721
1722 sc->rx.frag = NULL;
1723
1724 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1725 dev_kfree_skb(skb);
1726 goto requeue_drop_frag;
1727 }
1728
1729 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1730 skb->len);
1731 dev_kfree_skb_any(skb);
1732 skb = hdr_skb;
1733 }
1734
f078f209
LR
1735 /*
1736 * change the default rx antenna if rx diversity chooses the
1737 * other antenna 3 times in a row.
1738 */
29bffa96 1739 if (sc->rx.defant != rs.rs_antenna) {
b77f483f 1740 if (++sc->rx.rxotherant >= 3)
29bffa96 1741 ath_setdefantenna(sc, rs.rs_antenna);
f078f209 1742 } else {
b77f483f 1743 sc->rx.rxotherant = 0;
f078f209 1744 }
3cbb5dd7 1745
8ab2cd09 1746 spin_lock_irqsave(&sc->sc_pm_lock, flags);
aaef24b4
MSS
1747
1748 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
ededf1f8 1749 PS_WAIT_FOR_CAB |
aaef24b4
MSS
1750 PS_WAIT_FOR_PSPOLL_DATA)) ||
1751 unlikely(ath9k_check_auto_sleep(sc)))
cc65965c 1752 ath_rx_ps(sc, skb);
8ab2cd09 1753 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
cc65965c 1754
102885a5
VT
1755 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1756 ath_ant_comb_scan(sc, &rs);
1757
7545daf4 1758 ieee80211_rx(hw, skb);
cc65965c 1759
0d95521e
FF
1760requeue_drop_frag:
1761 if (sc->rx.frag) {
1762 dev_kfree_skb_any(sc->rx.frag);
1763 sc->rx.frag = NULL;
1764 }
cb71d9ba 1765requeue:
b5c80475
FF
1766 if (edma) {
1767 list_add_tail(&bf->list, &sc->rx.rxbuf);
1768 ath_rx_edma_buf_link(sc, qtype);
1769 } else {
1770 list_move_tail(&bf->list, &sc->rx.rxbuf);
1771 ath_rx_buf_link(sc, bf);
1772 }
be0418ad
S
1773 } while (1);
1774
b77f483f 1775 spin_unlock_bh(&sc->rx.rxbuflock);
f078f209
LR
1776
1777 return 0;
f078f209 1778}
This page took 0.564797 seconds and 5 git commands to generate.