ath9k: remove rssi/antenna information from recv debug stats
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / recv.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209 20
b5c80475
FF
21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22
102885a5
VT
23static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
26{
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
30}
31
b85c5734
MSS
32static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
35{
36 bool result = false;
37 switch (div_group) {
38 case 0:
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
40 result = true;
41 break;
42 case 1:
66ce235a 43 case 2:
b85c5734
MSS
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
50 (alt_rssi_avg >= 4))
51 result = true;
52 else
53 result = false;
54 break;
55 }
56
57 return result;
58}
59
ededf1f8
VT
60static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
61{
62 return sc->ps_enabled &&
63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
64}
65
f078f209
LR
66/*
67 * Setup and link descriptors.
68 *
69 * 11N: we can no longer afford to self link the last descriptor.
70 * MAC acknowledges BA status as long as it copies frames to host
71 * buffer (or rx fifo). This can incorrectly acknowledge packets
72 * to a sender if last desc is self-linked.
f078f209 73 */
f078f209
LR
74static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
75{
cbe61d8a 76 struct ath_hw *ah = sc->sc_ah;
cc861f74 77 struct ath_common *common = ath9k_hw_common(ah);
f078f209
LR
78 struct ath_desc *ds;
79 struct sk_buff *skb;
80
81 ATH_RXBUF_RESET(bf);
82
83 ds = bf->bf_desc;
be0418ad 84 ds->ds_link = 0; /* link to null */
f078f209
LR
85 ds->ds_data = bf->bf_buf_addr;
86
be0418ad 87 /* virtual addr of the beginning of the buffer. */
f078f209 88 skb = bf->bf_mpdu;
9680e8a3 89 BUG_ON(skb == NULL);
f078f209
LR
90 ds->ds_vdata = skb->data;
91
cc861f74
LR
92 /*
93 * setup rx descriptors. The rx_bufsize here tells the hardware
b4b6cda2 94 * how much data it can DMA to us and that we are prepared
cc861f74
LR
95 * to process
96 */
b77f483f 97 ath9k_hw_setuprxdesc(ah, ds,
cc861f74 98 common->rx_bufsize,
f078f209
LR
99 0);
100
b77f483f 101 if (sc->rx.rxlink == NULL)
f078f209
LR
102 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
103 else
b77f483f 104 *sc->rx.rxlink = bf->bf_daddr;
f078f209 105
b77f483f 106 sc->rx.rxlink = &ds->ds_link;
f078f209
LR
107}
108
ff37e337
S
109static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
110{
111 /* XXX block beacon interrupts */
112 ath9k_hw_setantenna(sc->sc_ah, antenna);
b77f483f
S
113 sc->rx.defant = antenna;
114 sc->rx.rxotherant = 0;
ff37e337
S
115}
116
f078f209
LR
117static void ath_opmode_init(struct ath_softc *sc)
118{
cbe61d8a 119 struct ath_hw *ah = sc->sc_ah;
1510718d
LR
120 struct ath_common *common = ath9k_hw_common(ah);
121
f078f209
LR
122 u32 rfilt, mfilt[2];
123
124 /* configure rx filter */
125 rfilt = ath_calcrxfilter(sc);
126 ath9k_hw_setrxfilter(ah, rfilt);
127
128 /* configure bssid mask */
364734fa 129 ath_hw_setbssidmask(common);
f078f209
LR
130
131 /* configure operational mode */
132 ath9k_hw_setopmode(ah);
133
f078f209
LR
134 /* calculate and install multicast filter */
135 mfilt[0] = mfilt[1] = ~0;
f078f209 136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
f078f209
LR
137}
138
b5c80475
FF
139static bool ath_rx_edma_buf_link(struct ath_softc *sc,
140 enum ath9k_rx_qtype qtype)
f078f209 141{
b5c80475
FF
142 struct ath_hw *ah = sc->sc_ah;
143 struct ath_rx_edma *rx_edma;
f078f209
LR
144 struct sk_buff *skb;
145 struct ath_buf *bf;
f078f209 146
b5c80475
FF
147 rx_edma = &sc->rx.rx_edma[qtype];
148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
149 return false;
f078f209 150
b5c80475
FF
151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
152 list_del_init(&bf->list);
f078f209 153
b5c80475
FF
154 skb = bf->bf_mpdu;
155
156 ATH_RXBUF_RESET(bf);
157 memset(skb->data, 0, ah->caps.rx_status_len);
158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
159 ah->caps.rx_status_len, DMA_TO_DEVICE);
f078f209 160
b5c80475
FF
161 SKB_CB_ATHBUF(skb) = bf;
162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
163 skb_queue_tail(&rx_edma->rx_fifo, skb);
f078f209 164
b5c80475
FF
165 return true;
166}
167
168static void ath_rx_addbuffer_edma(struct ath_softc *sc,
169 enum ath9k_rx_qtype qtype, int size)
170{
b5c80475 171 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
6a01f0c0 172 struct ath_buf *bf, *tbf;
b5c80475 173
b5c80475 174 if (list_empty(&sc->rx.rxbuf)) {
d2182b69 175 ath_dbg(common, QUEUE, "No free rx buf available\n");
b5c80475 176 return;
797fe5cb 177 }
f078f209 178
6a01f0c0 179 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
b5c80475
FF
180 if (!ath_rx_edma_buf_link(sc, qtype))
181 break;
182
b5c80475
FF
183}
184
185static void ath_rx_remove_buffer(struct ath_softc *sc,
186 enum ath9k_rx_qtype qtype)
187{
188 struct ath_buf *bf;
189 struct ath_rx_edma *rx_edma;
190 struct sk_buff *skb;
191
192 rx_edma = &sc->rx.rx_edma[qtype];
193
194 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
195 bf = SKB_CB_ATHBUF(skb);
196 BUG_ON(!bf);
197 list_add_tail(&bf->list, &sc->rx.rxbuf);
198 }
199}
200
201static void ath_rx_edma_cleanup(struct ath_softc *sc)
202{
ba542385
MSS
203 struct ath_hw *ah = sc->sc_ah;
204 struct ath_common *common = ath9k_hw_common(ah);
b5c80475
FF
205 struct ath_buf *bf;
206
207 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
208 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
209
797fe5cb 210 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
ba542385
MSS
211 if (bf->bf_mpdu) {
212 dma_unmap_single(sc->dev, bf->bf_buf_addr,
213 common->rx_bufsize,
214 DMA_BIDIRECTIONAL);
b5c80475 215 dev_kfree_skb_any(bf->bf_mpdu);
ba542385
MSS
216 bf->bf_buf_addr = 0;
217 bf->bf_mpdu = NULL;
218 }
b5c80475
FF
219 }
220
221 INIT_LIST_HEAD(&sc->rx.rxbuf);
222
223 kfree(sc->rx.rx_bufptr);
224 sc->rx.rx_bufptr = NULL;
225}
226
227static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
228{
229 skb_queue_head_init(&rx_edma->rx_fifo);
230 skb_queue_head_init(&rx_edma->rx_buffers);
231 rx_edma->rx_fifo_hwsize = size;
232}
233
234static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
235{
236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
237 struct ath_hw *ah = sc->sc_ah;
238 struct sk_buff *skb;
239 struct ath_buf *bf;
240 int error = 0, i;
241 u32 size;
242
b5c80475
FF
243 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
244 ah->caps.rx_status_len);
245
246 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
247 ah->caps.rx_lp_qdepth);
248 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
249 ah->caps.rx_hp_qdepth);
250
251 size = sizeof(struct ath_buf) * nbufs;
252 bf = kzalloc(size, GFP_KERNEL);
253 if (!bf)
254 return -ENOMEM;
255
256 INIT_LIST_HEAD(&sc->rx.rxbuf);
257 sc->rx.rx_bufptr = bf;
258
259 for (i = 0; i < nbufs; i++, bf++) {
cc861f74 260 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
b5c80475 261 if (!skb) {
797fe5cb 262 error = -ENOMEM;
b5c80475 263 goto rx_init_fail;
f078f209 264 }
f078f209 265
b5c80475 266 memset(skb->data, 0, common->rx_bufsize);
797fe5cb 267 bf->bf_mpdu = skb;
b5c80475 268
797fe5cb 269 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
cc861f74 270 common->rx_bufsize,
b5c80475 271 DMA_BIDIRECTIONAL);
797fe5cb 272 if (unlikely(dma_mapping_error(sc->dev,
b5c80475
FF
273 bf->bf_buf_addr))) {
274 dev_kfree_skb_any(skb);
275 bf->bf_mpdu = NULL;
6cf9e995 276 bf->bf_buf_addr = 0;
3800276a 277 ath_err(common,
b5c80475
FF
278 "dma_mapping_error() on RX init\n");
279 error = -ENOMEM;
280 goto rx_init_fail;
281 }
282
283 list_add_tail(&bf->list, &sc->rx.rxbuf);
284 }
285
286 return 0;
287
288rx_init_fail:
289 ath_rx_edma_cleanup(sc);
290 return error;
291}
292
293static void ath_edma_start_recv(struct ath_softc *sc)
294{
295 spin_lock_bh(&sc->rx.rxbuflock);
296
297 ath9k_hw_rxena(sc->sc_ah);
298
299 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
300 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
301
302 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
303 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
304
b5c80475
FF
305 ath_opmode_init(sc);
306
48a6a468 307 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
7583c550
LR
308
309 spin_unlock_bh(&sc->rx.rxbuflock);
b5c80475
FF
310}
311
312static void ath_edma_stop_recv(struct ath_softc *sc)
313{
b5c80475
FF
314 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
315 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
b5c80475
FF
316}
317
318int ath_rx_init(struct ath_softc *sc, int nbufs)
319{
320 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
321 struct sk_buff *skb;
322 struct ath_buf *bf;
323 int error = 0;
324
4bdd1e97 325 spin_lock_init(&sc->sc_pcu_lock);
b5c80475
FF
326 sc->sc_flags &= ~SC_OP_RXFLUSH;
327 spin_lock_init(&sc->rx.rxbuflock);
328
0d95521e
FF
329 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
330 sc->sc_ah->caps.rx_status_len;
331
b5c80475
FF
332 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
333 return ath_rx_edma_init(sc, nbufs);
334 } else {
d2182b69 335 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
226afe68 336 common->cachelsz, common->rx_bufsize);
b5c80475
FF
337
338 /* Initialize rx descriptors */
339
340 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
4adfcded 341 "rx", nbufs, 1, 0);
b5c80475 342 if (error != 0) {
3800276a
JP
343 ath_err(common,
344 "failed to allocate rx descriptors: %d\n",
345 error);
797fe5cb
S
346 goto err;
347 }
b5c80475
FF
348
349 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
350 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
351 GFP_KERNEL);
352 if (skb == NULL) {
353 error = -ENOMEM;
354 goto err;
355 }
356
357 bf->bf_mpdu = skb;
358 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
359 common->rx_bufsize,
360 DMA_FROM_DEVICE);
361 if (unlikely(dma_mapping_error(sc->dev,
362 bf->bf_buf_addr))) {
363 dev_kfree_skb_any(skb);
364 bf->bf_mpdu = NULL;
6cf9e995 365 bf->bf_buf_addr = 0;
3800276a
JP
366 ath_err(common,
367 "dma_mapping_error() on RX init\n");
b5c80475
FF
368 error = -ENOMEM;
369 goto err;
370 }
b5c80475
FF
371 }
372 sc->rx.rxlink = NULL;
797fe5cb 373 }
f078f209 374
797fe5cb 375err:
f078f209
LR
376 if (error)
377 ath_rx_cleanup(sc);
378
379 return error;
380}
381
f078f209
LR
382void ath_rx_cleanup(struct ath_softc *sc)
383{
cc861f74
LR
384 struct ath_hw *ah = sc->sc_ah;
385 struct ath_common *common = ath9k_hw_common(ah);
f078f209
LR
386 struct sk_buff *skb;
387 struct ath_buf *bf;
388
b5c80475
FF
389 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
390 ath_rx_edma_cleanup(sc);
391 return;
392 } else {
393 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
394 skb = bf->bf_mpdu;
395 if (skb) {
396 dma_unmap_single(sc->dev, bf->bf_buf_addr,
397 common->rx_bufsize,
398 DMA_FROM_DEVICE);
399 dev_kfree_skb(skb);
6cf9e995
BG
400 bf->bf_buf_addr = 0;
401 bf->bf_mpdu = NULL;
b5c80475 402 }
051b9191 403 }
f078f209 404
b5c80475
FF
405 if (sc->rx.rxdma.dd_desc_len != 0)
406 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
407 }
f078f209
LR
408}
409
410/*
411 * Calculate the receive filter according to the
412 * operating mode and state:
413 *
414 * o always accept unicast, broadcast, and multicast traffic
415 * o maintain current state of phy error reception (the hal
416 * may enable phy error frames for noise immunity work)
417 * o probe request frames are accepted only when operating in
418 * hostap, adhoc, or monitor modes
419 * o enable promiscuous mode according to the interface state
420 * o accept beacons:
421 * - when operating in adhoc mode so the 802.11 layer creates
422 * node table entries for peers,
423 * - when operating in station mode for collecting rssi data when
424 * the station is otherwise quiet, or
425 * - when operating as a repeater so we see repeater-sta beacons
426 * - when scanning
427 */
428
429u32 ath_calcrxfilter(struct ath_softc *sc)
430{
f078f209
LR
431 u32 rfilt;
432
ac06697c 433 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
f078f209
LR
434 | ATH9K_RX_FILTER_MCAST;
435
9c1d8e4a 436 if (sc->rx.rxfilter & FIF_PROBE_REQ)
f078f209
LR
437 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
438
217ba9da
JM
439 /*
440 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
441 * mode interface or when in monitor mode. AP mode does not need this
442 * since it receives all in-BSS frames anyway.
443 */
2e286947 444 if (sc->sc_ah->is_monitoring)
f078f209 445 rfilt |= ATH9K_RX_FILTER_PROM;
f078f209 446
d42c6b71
S
447 if (sc->rx.rxfilter & FIF_CONTROL)
448 rfilt |= ATH9K_RX_FILTER_CONTROL;
449
dbaaa147 450 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
cfda6695 451 (sc->nvifs <= 1) &&
dbaaa147
VT
452 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
453 rfilt |= ATH9K_RX_FILTER_MYBEACON;
454 else
f078f209
LR
455 rfilt |= ATH9K_RX_FILTER_BEACON;
456
264bbec8 457 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
66afad01 458 (sc->rx.rxfilter & FIF_PSPOLL))
dbaaa147 459 rfilt |= ATH9K_RX_FILTER_PSPOLL;
be0418ad 460
7ea310be
S
461 if (conf_is_ht(&sc->hw->conf))
462 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
463
7545daf4 464 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
5eb6ba83
JC
465 /* The following may also be needed for other older chips */
466 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
467 rfilt |= ATH9K_RX_FILTER_PROM;
b93bce2a
JM
468 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
469 }
470
f078f209 471 return rfilt;
7dcfdcd9 472
f078f209
LR
473}
474
f078f209
LR
475int ath_startrecv(struct ath_softc *sc)
476{
cbe61d8a 477 struct ath_hw *ah = sc->sc_ah;
f078f209
LR
478 struct ath_buf *bf, *tbf;
479
b5c80475
FF
480 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
481 ath_edma_start_recv(sc);
482 return 0;
483 }
484
b77f483f
S
485 spin_lock_bh(&sc->rx.rxbuflock);
486 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
487 goto start_recv;
488
b77f483f
S
489 sc->rx.rxlink = NULL;
490 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
f078f209
LR
491 ath_rx_buf_link(sc, bf);
492 }
493
494 /* We could have deleted elements so the list may be empty now */
b77f483f 495 if (list_empty(&sc->rx.rxbuf))
f078f209
LR
496 goto start_recv;
497
b77f483f 498 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
f078f209 499 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
be0418ad 500 ath9k_hw_rxena(ah);
f078f209
LR
501
502start_recv:
be0418ad 503 ath_opmode_init(sc);
48a6a468 504 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
be0418ad 505
7583c550
LR
506 spin_unlock_bh(&sc->rx.rxbuflock);
507
f078f209
LR
508 return 0;
509}
510
f078f209
LR
511bool ath_stoprecv(struct ath_softc *sc)
512{
cbe61d8a 513 struct ath_hw *ah = sc->sc_ah;
5882da02 514 bool stopped, reset = false;
f078f209 515
1e450285 516 spin_lock_bh(&sc->rx.rxbuflock);
d47844a0 517 ath9k_hw_abortpcurecv(ah);
be0418ad 518 ath9k_hw_setrxfilter(ah, 0);
5882da02 519 stopped = ath9k_hw_stopdmarecv(ah, &reset);
b5c80475
FF
520
521 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
522 ath_edma_stop_recv(sc);
523 else
524 sc->rx.rxlink = NULL;
1e450285 525 spin_unlock_bh(&sc->rx.rxbuflock);
be0418ad 526
d584747b
RM
527 if (!(ah->ah_flags & AH_UNPLUGGED) &&
528 unlikely(!stopped)) {
d7fd1b50
BG
529 ath_err(ath9k_hw_common(sc->sc_ah),
530 "Could not stop RX, we could be "
531 "confusing the DMA engine when we start RX up\n");
532 ATH_DBG_WARN_ON_ONCE(!stopped);
533 }
2232d31b 534 return stopped && !reset;
f078f209
LR
535}
536
f078f209
LR
537void ath_flushrecv(struct ath_softc *sc)
538{
98deeea0 539 sc->sc_flags |= SC_OP_RXFLUSH;
b5c80475
FF
540 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
541 ath_rx_tasklet(sc, 1, true);
542 ath_rx_tasklet(sc, 1, false);
98deeea0 543 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
544}
545
cc65965c
JM
546static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
547{
548 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
549 struct ieee80211_mgmt *mgmt;
550 u8 *pos, *end, id, elen;
551 struct ieee80211_tim_ie *tim;
552
553 mgmt = (struct ieee80211_mgmt *)skb->data;
554 pos = mgmt->u.beacon.variable;
555 end = skb->data + skb->len;
556
557 while (pos + 2 < end) {
558 id = *pos++;
559 elen = *pos++;
560 if (pos + elen > end)
561 break;
562
563 if (id == WLAN_EID_TIM) {
564 if (elen < sizeof(*tim))
565 break;
566 tim = (struct ieee80211_tim_ie *) pos;
567 if (tim->dtim_count != 0)
568 break;
569 return tim->bitmap_ctrl & 0x01;
570 }
571
572 pos += elen;
573 }
574
575 return false;
576}
577
cc65965c
JM
578static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
579{
1510718d 580 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
581
582 if (skb->len < 24 + 8 + 2 + 2)
583 return;
584
1b04b930 585 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
293dc5df 586
1b04b930
S
587 if (sc->ps_flags & PS_BEACON_SYNC) {
588 sc->ps_flags &= ~PS_BEACON_SYNC;
d2182b69 589 ath_dbg(common, PS,
226afe68 590 "Reconfigure Beacon timers based on timestamp from the AP\n");
99e4d43a 591 ath_set_beacon(sc);
ccdfeab6
JM
592 }
593
cc65965c
JM
594 if (ath_beacon_dtim_pending_cab(skb)) {
595 /*
596 * Remain awake waiting for buffered broadcast/multicast
58f5fffd
GJ
597 * frames. If the last broadcast/multicast frame is not
598 * received properly, the next beacon frame will work as
599 * a backup trigger for returning into NETWORK SLEEP state,
600 * so we are waiting for it as well.
cc65965c 601 */
d2182b69 602 ath_dbg(common, PS,
226afe68 603 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
1b04b930 604 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
cc65965c
JM
605 return;
606 }
607
1b04b930 608 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
cc65965c
JM
609 /*
610 * This can happen if a broadcast frame is dropped or the AP
611 * fails to send a frame indicating that all CAB frames have
612 * been delivered.
613 */
1b04b930 614 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
d2182b69 615 ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
cc65965c 616 }
cc65965c
JM
617}
618
f73c604c 619static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
cc65965c
JM
620{
621 struct ieee80211_hdr *hdr;
c46917bb 622 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
cc65965c
JM
623
624 hdr = (struct ieee80211_hdr *)skb->data;
625
626 /* Process Beacon and CAB receive in PS state */
ededf1f8 627 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
f73c604c 628 && mybeacon)
cc65965c 629 ath_rx_ps_beacon(sc, skb);
1b04b930 630 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
cc65965c
JM
631 (ieee80211_is_data(hdr->frame_control) ||
632 ieee80211_is_action(hdr->frame_control)) &&
633 is_multicast_ether_addr(hdr->addr1) &&
634 !ieee80211_has_moredata(hdr->frame_control)) {
cc65965c
JM
635 /*
636 * No more broadcast/multicast frames to be received at this
637 * point.
638 */
3fac6dfd 639 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
d2182b69 640 ath_dbg(common, PS,
226afe68 641 "All PS CAB frames received, back to sleep\n");
1b04b930 642 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
9a23f9ca
JM
643 !is_multicast_ether_addr(hdr->addr1) &&
644 !ieee80211_has_morefrags(hdr->frame_control)) {
1b04b930 645 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
d2182b69 646 ath_dbg(common, PS,
226afe68 647 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
1b04b930
S
648 sc->ps_flags & (PS_WAIT_FOR_BEACON |
649 PS_WAIT_FOR_CAB |
650 PS_WAIT_FOR_PSPOLL_DATA |
651 PS_WAIT_FOR_TX_ACK));
cc65965c
JM
652 }
653}
654
b5c80475
FF
655static bool ath_edma_get_buffers(struct ath_softc *sc,
656 enum ath9k_rx_qtype qtype)
f078f209 657{
b5c80475
FF
658 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
659 struct ath_hw *ah = sc->sc_ah;
660 struct ath_common *common = ath9k_hw_common(ah);
661 struct sk_buff *skb;
662 struct ath_buf *bf;
663 int ret;
664
665 skb = skb_peek(&rx_edma->rx_fifo);
666 if (!skb)
667 return false;
668
669 bf = SKB_CB_ATHBUF(skb);
670 BUG_ON(!bf);
671
ce9426d1 672 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
673 common->rx_bufsize, DMA_FROM_DEVICE);
674
675 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
ce9426d1
ML
676 if (ret == -EINPROGRESS) {
677 /*let device gain the buffer again*/
678 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
679 common->rx_bufsize, DMA_FROM_DEVICE);
b5c80475 680 return false;
ce9426d1 681 }
b5c80475
FF
682
683 __skb_unlink(skb, &rx_edma->rx_fifo);
684 if (ret == -EINVAL) {
685 /* corrupt descriptor, skip this one and the following one */
686 list_add_tail(&bf->list, &sc->rx.rxbuf);
687 ath_rx_edma_buf_link(sc, qtype);
688 skb = skb_peek(&rx_edma->rx_fifo);
689 if (!skb)
690 return true;
691
692 bf = SKB_CB_ATHBUF(skb);
693 BUG_ON(!bf);
694
695 __skb_unlink(skb, &rx_edma->rx_fifo);
696 list_add_tail(&bf->list, &sc->rx.rxbuf);
697 ath_rx_edma_buf_link(sc, qtype);
083e3e8d 698 return true;
b5c80475
FF
699 }
700 skb_queue_tail(&rx_edma->rx_buffers, skb);
701
702 return true;
703}
f078f209 704
b5c80475
FF
705static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
706 struct ath_rx_status *rs,
707 enum ath9k_rx_qtype qtype)
708{
709 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
710 struct sk_buff *skb;
be0418ad 711 struct ath_buf *bf;
b5c80475
FF
712
713 while (ath_edma_get_buffers(sc, qtype));
714 skb = __skb_dequeue(&rx_edma->rx_buffers);
715 if (!skb)
716 return NULL;
717
718 bf = SKB_CB_ATHBUF(skb);
719 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
720 return bf;
721}
722
723static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
724 struct ath_rx_status *rs)
725{
726 struct ath_hw *ah = sc->sc_ah;
727 struct ath_common *common = ath9k_hw_common(ah);
f078f209 728 struct ath_desc *ds;
b5c80475
FF
729 struct ath_buf *bf;
730 int ret;
731
732 if (list_empty(&sc->rx.rxbuf)) {
733 sc->rx.rxlink = NULL;
734 return NULL;
735 }
736
737 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
738 ds = bf->bf_desc;
739
740 /*
741 * Must provide the virtual address of the current
742 * descriptor, the physical address, and the virtual
743 * address of the next descriptor in the h/w chain.
744 * This allows the HAL to look ahead to see if the
745 * hardware is done with a descriptor by checking the
746 * done bit in the following descriptor and the address
747 * of the current descriptor the DMA engine is working
748 * on. All this is necessary because of our use of
749 * a self-linked list to avoid rx overruns.
750 */
3de21116 751 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
b5c80475
FF
752 if (ret == -EINPROGRESS) {
753 struct ath_rx_status trs;
754 struct ath_buf *tbf;
755 struct ath_desc *tds;
756
757 memset(&trs, 0, sizeof(trs));
758 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
759 sc->rx.rxlink = NULL;
760 return NULL;
761 }
762
763 tbf = list_entry(bf->list.next, struct ath_buf, list);
764
765 /*
766 * On some hardware the descriptor status words could
767 * get corrupted, including the done bit. Because of
768 * this, check if the next descriptor's done bit is
769 * set or not.
770 *
771 * If the next descriptor's done bit is set, the current
772 * descriptor has been corrupted. Force s/w to discard
773 * this descriptor and continue...
774 */
775
776 tds = tbf->bf_desc;
3de21116 777 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
b5c80475
FF
778 if (ret == -EINPROGRESS)
779 return NULL;
780 }
781
782 if (!bf->bf_mpdu)
783 return bf;
784
785 /*
786 * Synchronize the DMA transfer with CPU before
787 * 1. accessing the frame
788 * 2. requeueing the same buffer to h/w
789 */
ce9426d1 790 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
b5c80475
FF
791 common->rx_bufsize,
792 DMA_FROM_DEVICE);
793
794 return bf;
795}
796
d435700f
S
797/* Assumes you've already done the endian to CPU conversion */
798static bool ath9k_rx_accept(struct ath_common *common,
9f167f64 799 struct ieee80211_hdr *hdr,
d435700f
S
800 struct ieee80211_rx_status *rxs,
801 struct ath_rx_status *rx_stats,
802 bool *decrypt_error)
803{
ec205999 804 struct ath_softc *sc = (struct ath_softc *) common->priv;
66760eac 805 bool is_mc, is_valid_tkip, strip_mic, mic_error;
d435700f 806 struct ath_hw *ah = common->ah;
d435700f 807 __le16 fc;
b7b1b512 808 u8 rx_status_len = ah->caps.rx_status_len;
d435700f 809
d435700f
S
810 fc = hdr->frame_control;
811
66760eac
FF
812 is_mc = !!is_multicast_ether_addr(hdr->addr1);
813 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
814 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
152e585d
BJ
815 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
816 !(rx_stats->rs_status &
846d9363
FF
817 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
818 ATH9K_RXERR_KEYMISS));
66760eac 819
f88373fa
FF
820 /*
821 * Key miss events are only relevant for pairwise keys where the
822 * descriptor does contain a valid key index. This has been observed
823 * mostly with CCMP encryption.
824 */
825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
826 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
827
d435700f
S
828 if (!rx_stats->rs_datalen)
829 return false;
830 /*
831 * rs_status follows rs_datalen so if rs_datalen is too large
832 * we can take a hint that hardware corrupted it, so ignore
833 * those frames.
834 */
b7b1b512 835 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
d435700f
S
836 return false;
837
0d95521e 838 /* Only use error bits from the last fragment */
d435700f 839 if (rx_stats->rs_more)
0d95521e 840 return true;
d435700f 841
66760eac
FF
842 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
843 !ieee80211_has_morefrags(fc) &&
844 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
845 (rx_stats->rs_status & ATH9K_RXERR_MIC);
846
d435700f
S
847 /*
848 * The rx_stats->rs_status will not be set until the end of the
849 * chained descriptors so it can be ignored if rs_more is set. The
850 * rs_more will be false at the last element of the chained
851 * descriptors.
852 */
853 if (rx_stats->rs_status != 0) {
846d9363
FF
854 u8 status_mask;
855
66760eac 856 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
d435700f 857 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
66760eac
FF
858 mic_error = false;
859 }
d435700f
S
860 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
861 return false;
862
846d9363
FF
863 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
864 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
d435700f 865 *decrypt_error = true;
66760eac 866 mic_error = false;
d435700f 867 }
66760eac 868
d435700f
S
869 /*
870 * Reject error frames with the exception of
871 * decryption and MIC failures. For monitor mode,
872 * we also ignore the CRC error.
873 */
846d9363
FF
874 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
875 ATH9K_RXERR_KEYMISS;
876
ec205999 877 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
846d9363
FF
878 status_mask |= ATH9K_RXERR_CRC;
879
880 if (rx_stats->rs_status & ~status_mask)
881 return false;
d435700f 882 }
66760eac
FF
883
884 /*
885 * For unicast frames the MIC error bit can have false positives,
886 * so all MIC error reports need to be validated in software.
887 * False negatives are not common, so skip software verification
888 * if the hardware considers the MIC valid.
889 */
890 if (strip_mic)
891 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
892 else if (is_mc && mic_error)
893 rxs->flag |= RX_FLAG_MMIC_ERROR;
894
d435700f
S
895 return true;
896}
897
898static int ath9k_process_rate(struct ath_common *common,
899 struct ieee80211_hw *hw,
900 struct ath_rx_status *rx_stats,
9f167f64 901 struct ieee80211_rx_status *rxs)
d435700f
S
902{
903 struct ieee80211_supported_band *sband;
904 enum ieee80211_band band;
905 unsigned int i = 0;
906
907 band = hw->conf.channel->band;
908 sband = hw->wiphy->bands[band];
909
910 if (rx_stats->rs_rate & 0x80) {
911 /* HT rate */
912 rxs->flag |= RX_FLAG_HT;
913 if (rx_stats->rs_flags & ATH9K_RX_2040)
914 rxs->flag |= RX_FLAG_40MHZ;
915 if (rx_stats->rs_flags & ATH9K_RX_GI)
916 rxs->flag |= RX_FLAG_SHORT_GI;
917 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
918 return 0;
919 }
920
921 for (i = 0; i < sband->n_bitrates; i++) {
922 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
923 rxs->rate_idx = i;
924 return 0;
925 }
926 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
927 rxs->flag |= RX_FLAG_SHORTPRE;
928 rxs->rate_idx = i;
929 return 0;
930 }
931 }
932
933 /*
934 * No valid hardware bitrate found -- we should not get here
935 * because hardware has already validated this frame as OK.
936 */
d2182b69 937 ath_dbg(common, ANY,
226afe68
JP
938 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
939 rx_stats->rs_rate);
d435700f
S
940
941 return -EINVAL;
942}
943
944static void ath9k_process_rssi(struct ath_common *common,
945 struct ieee80211_hw *hw,
9f167f64 946 struct ieee80211_hdr *hdr,
d435700f
S
947 struct ath_rx_status *rx_stats)
948{
9ac58615 949 struct ath_softc *sc = hw->priv;
d435700f 950 struct ath_hw *ah = common->ah;
9fa23e17 951 int last_rssi;
d435700f 952
cf3af748
RM
953 if (!rx_stats->is_mybeacon ||
954 ((ah->opmode != NL80211_IFTYPE_STATION) &&
955 (ah->opmode != NL80211_IFTYPE_ADHOC)))
9fa23e17
FF
956 return;
957
9fa23e17 958 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
9ac58615 959 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
d435700f 960
9ac58615 961 last_rssi = sc->last_rssi;
d435700f
S
962 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
963 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
964 ATH_RSSI_EP_MULTIPLIER);
965 if (rx_stats->rs_rssi < 0)
966 rx_stats->rs_rssi = 0;
967
968 /* Update Beacon RSSI, this is used by ANI. */
9fa23e17 969 ah->stats.avgbrssi = rx_stats->rs_rssi;
d435700f
S
970}
971
972/*
973 * For Decrypt or Demic errors, we only mark packet status here and always push
974 * up the frame up to let mac80211 handle the actual error case, be it no
975 * decryption key or real decryption error. This let us keep statistics there.
976 */
977static int ath9k_rx_skb_preprocess(struct ath_common *common,
978 struct ieee80211_hw *hw,
9f167f64 979 struct ieee80211_hdr *hdr,
d435700f
S
980 struct ath_rx_status *rx_stats,
981 struct ieee80211_rx_status *rx_status,
982 bool *decrypt_error)
983{
f749b946
FF
984 struct ath_hw *ah = common->ah;
985
d435700f
S
986 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
987
988 /*
989 * everything but the rate is checked here, the rate check is done
990 * separately to avoid doing two lookups for a rate for each frame.
991 */
9f167f64 992 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
d435700f
S
993 return -EINVAL;
994
0d95521e
FF
995 /* Only use status info from the last fragment */
996 if (rx_stats->rs_more)
997 return 0;
998
9f167f64 999 ath9k_process_rssi(common, hw, hdr, rx_stats);
d435700f 1000
9f167f64 1001 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
d435700f
S
1002 return -EINVAL;
1003
d435700f
S
1004 rx_status->band = hw->conf.channel->band;
1005 rx_status->freq = hw->conf.channel->center_freq;
f749b946 1006 rx_status->signal = ah->noise + rx_stats->rs_rssi;
d435700f 1007 rx_status->antenna = rx_stats->rs_antenna;
6ebacbb7 1008 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
d435700f
S
1009
1010 return 0;
1011}
1012
1013static void ath9k_rx_skb_postprocess(struct ath_common *common,
1014 struct sk_buff *skb,
1015 struct ath_rx_status *rx_stats,
1016 struct ieee80211_rx_status *rxs,
1017 bool decrypt_error)
1018{
1019 struct ath_hw *ah = common->ah;
1020 struct ieee80211_hdr *hdr;
1021 int hdrlen, padpos, padsize;
1022 u8 keyix;
1023 __le16 fc;
1024
1025 /* see if any padding is done by the hw and remove it */
1026 hdr = (struct ieee80211_hdr *) skb->data;
1027 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1028 fc = hdr->frame_control;
1029 padpos = ath9k_cmn_padpos(hdr->frame_control);
1030
1031 /* The MAC header is padded to have 32-bit boundary if the
1032 * packet payload is non-zero. The general calculation for
1033 * padsize would take into account odd header lengths:
1034 * padsize = (4 - padpos % 4) % 4; However, since only
1035 * even-length headers are used, padding can only be 0 or 2
1036 * bytes and we can optimize this a bit. In addition, we must
1037 * not try to remove padding from short control frames that do
1038 * not have payload. */
1039 padsize = padpos & 3;
1040 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1041 memmove(skb->data + padsize, skb->data, padpos);
1042 skb_pull(skb, padsize);
1043 }
1044
1045 keyix = rx_stats->rs_keyix;
1046
1047 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1048 ieee80211_has_protected(fc)) {
1049 rxs->flag |= RX_FLAG_DECRYPTED;
1050 } else if (ieee80211_has_protected(fc)
1051 && !decrypt_error && skb->len >= hdrlen + 4) {
1052 keyix = skb->data[hdrlen + 3] >> 6;
1053
1054 if (test_bit(keyix, common->keymap))
1055 rxs->flag |= RX_FLAG_DECRYPTED;
1056 }
1057 if (ah->sw_mgmt_crypto &&
1058 (rxs->flag & RX_FLAG_DECRYPTED) &&
1059 ieee80211_is_mgmt(fc))
1060 /* Use software decrypt for management frames. */
1061 rxs->flag &= ~RX_FLAG_DECRYPTED;
1062}
b5c80475 1063
102885a5
VT
1064static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1065 struct ath_hw_antcomb_conf ant_conf,
1066 int main_rssi_avg)
1067{
1068 antcomb->quick_scan_cnt = 0;
1069
1070 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1071 antcomb->rssi_lna2 = main_rssi_avg;
1072 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1073 antcomb->rssi_lna1 = main_rssi_avg;
1074
1075 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
223c5a87 1076 case 0x10: /* LNA2 A-B */
102885a5
VT
1077 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1078 antcomb->first_quick_scan_conf =
1079 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1080 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1081 break;
223c5a87 1082 case 0x20: /* LNA1 A-B */
102885a5
VT
1083 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1084 antcomb->first_quick_scan_conf =
1085 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1086 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1087 break;
223c5a87 1088 case 0x21: /* LNA1 LNA2 */
102885a5
VT
1089 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1090 antcomb->first_quick_scan_conf =
1091 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1092 antcomb->second_quick_scan_conf =
1093 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1094 break;
223c5a87 1095 case 0x12: /* LNA2 LNA1 */
102885a5
VT
1096 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1097 antcomb->first_quick_scan_conf =
1098 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1099 antcomb->second_quick_scan_conf =
1100 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1101 break;
223c5a87 1102 case 0x13: /* LNA2 A+B */
102885a5
VT
1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1104 antcomb->first_quick_scan_conf =
1105 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1106 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1107 break;
223c5a87 1108 case 0x23: /* LNA1 A+B */
102885a5
VT
1109 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1110 antcomb->first_quick_scan_conf =
1111 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1112 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1113 break;
1114 default:
1115 break;
1116 }
1117}
1118
1119static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1120 struct ath_hw_antcomb_conf *div_ant_conf,
1121 int main_rssi_avg, int alt_rssi_avg,
1122 int alt_ratio)
1123{
1124 /* alt_good */
1125 switch (antcomb->quick_scan_cnt) {
1126 case 0:
1127 /* set alt to main, and alt to first conf */
1128 div_ant_conf->main_lna_conf = antcomb->main_conf;
1129 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1130 break;
1131 case 1:
1132 /* set alt to main, and alt to first conf */
1133 div_ant_conf->main_lna_conf = antcomb->main_conf;
1134 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1135 antcomb->rssi_first = main_rssi_avg;
1136 antcomb->rssi_second = alt_rssi_avg;
1137
1138 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1139 /* main is LNA1 */
1140 if (ath_is_alt_ant_ratio_better(alt_ratio,
1141 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1142 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1143 main_rssi_avg, alt_rssi_avg,
1144 antcomb->total_pkt_count))
1145 antcomb->first_ratio = true;
1146 else
1147 antcomb->first_ratio = false;
1148 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1149 if (ath_is_alt_ant_ratio_better(alt_ratio,
1150 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1151 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1152 main_rssi_avg, alt_rssi_avg,
1153 antcomb->total_pkt_count))
1154 antcomb->first_ratio = true;
1155 else
1156 antcomb->first_ratio = false;
1157 } else {
1158 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1159 (alt_rssi_avg > main_rssi_avg +
1160 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1161 (alt_rssi_avg > main_rssi_avg)) &&
1162 (antcomb->total_pkt_count > 50))
1163 antcomb->first_ratio = true;
1164 else
1165 antcomb->first_ratio = false;
1166 }
1167 break;
1168 case 2:
1169 antcomb->alt_good = false;
1170 antcomb->scan_not_start = false;
1171 antcomb->scan = false;
1172 antcomb->rssi_first = main_rssi_avg;
1173 antcomb->rssi_third = alt_rssi_avg;
1174
1175 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1176 antcomb->rssi_lna1 = alt_rssi_avg;
1177 else if (antcomb->second_quick_scan_conf ==
1178 ATH_ANT_DIV_COMB_LNA2)
1179 antcomb->rssi_lna2 = alt_rssi_avg;
1180 else if (antcomb->second_quick_scan_conf ==
1181 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1182 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1183 antcomb->rssi_lna2 = main_rssi_avg;
1184 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1185 antcomb->rssi_lna1 = main_rssi_avg;
1186 }
1187
1188 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1189 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1190 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1191 else
1192 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1193
1194 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1195 if (ath_is_alt_ant_ratio_better(alt_ratio,
1196 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1197 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1198 main_rssi_avg, alt_rssi_avg,
1199 antcomb->total_pkt_count))
1200 antcomb->second_ratio = true;
1201 else
1202 antcomb->second_ratio = false;
1203 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1204 if (ath_is_alt_ant_ratio_better(alt_ratio,
1205 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1206 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1207 main_rssi_avg, alt_rssi_avg,
1208 antcomb->total_pkt_count))
1209 antcomb->second_ratio = true;
1210 else
1211 antcomb->second_ratio = false;
1212 } else {
1213 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1214 (alt_rssi_avg > main_rssi_avg +
1215 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1216 (alt_rssi_avg > main_rssi_avg)) &&
1217 (antcomb->total_pkt_count > 50))
1218 antcomb->second_ratio = true;
1219 else
1220 antcomb->second_ratio = false;
1221 }
1222
1223 /* set alt to the conf with maximun ratio */
1224 if (antcomb->first_ratio && antcomb->second_ratio) {
1225 if (antcomb->rssi_second > antcomb->rssi_third) {
1226 /* first alt*/
1227 if ((antcomb->first_quick_scan_conf ==
1228 ATH_ANT_DIV_COMB_LNA1) ||
1229 (antcomb->first_quick_scan_conf ==
1230 ATH_ANT_DIV_COMB_LNA2))
1231 /* Set alt LNA1 or LNA2*/
1232 if (div_ant_conf->main_lna_conf ==
1233 ATH_ANT_DIV_COMB_LNA2)
1234 div_ant_conf->alt_lna_conf =
1235 ATH_ANT_DIV_COMB_LNA1;
1236 else
1237 div_ant_conf->alt_lna_conf =
1238 ATH_ANT_DIV_COMB_LNA2;
1239 else
1240 /* Set alt to A+B or A-B */
1241 div_ant_conf->alt_lna_conf =
1242 antcomb->first_quick_scan_conf;
1243 } else if ((antcomb->second_quick_scan_conf ==
1244 ATH_ANT_DIV_COMB_LNA1) ||
1245 (antcomb->second_quick_scan_conf ==
1246 ATH_ANT_DIV_COMB_LNA2)) {
1247 /* Set alt LNA1 or LNA2 */
1248 if (div_ant_conf->main_lna_conf ==
1249 ATH_ANT_DIV_COMB_LNA2)
1250 div_ant_conf->alt_lna_conf =
1251 ATH_ANT_DIV_COMB_LNA1;
1252 else
1253 div_ant_conf->alt_lna_conf =
1254 ATH_ANT_DIV_COMB_LNA2;
1255 } else {
1256 /* Set alt to A+B or A-B */
1257 div_ant_conf->alt_lna_conf =
1258 antcomb->second_quick_scan_conf;
1259 }
1260 } else if (antcomb->first_ratio) {
1261 /* first alt */
1262 if ((antcomb->first_quick_scan_conf ==
1263 ATH_ANT_DIV_COMB_LNA1) ||
1264 (antcomb->first_quick_scan_conf ==
1265 ATH_ANT_DIV_COMB_LNA2))
1266 /* Set alt LNA1 or LNA2 */
1267 if (div_ant_conf->main_lna_conf ==
1268 ATH_ANT_DIV_COMB_LNA2)
1269 div_ant_conf->alt_lna_conf =
1270 ATH_ANT_DIV_COMB_LNA1;
1271 else
1272 div_ant_conf->alt_lna_conf =
1273 ATH_ANT_DIV_COMB_LNA2;
1274 else
1275 /* Set alt to A+B or A-B */
1276 div_ant_conf->alt_lna_conf =
1277 antcomb->first_quick_scan_conf;
1278 } else if (antcomb->second_ratio) {
1279 /* second alt */
1280 if ((antcomb->second_quick_scan_conf ==
1281 ATH_ANT_DIV_COMB_LNA1) ||
1282 (antcomb->second_quick_scan_conf ==
1283 ATH_ANT_DIV_COMB_LNA2))
1284 /* Set alt LNA1 or LNA2 */
1285 if (div_ant_conf->main_lna_conf ==
1286 ATH_ANT_DIV_COMB_LNA2)
1287 div_ant_conf->alt_lna_conf =
1288 ATH_ANT_DIV_COMB_LNA1;
1289 else
1290 div_ant_conf->alt_lna_conf =
1291 ATH_ANT_DIV_COMB_LNA2;
1292 else
1293 /* Set alt to A+B or A-B */
1294 div_ant_conf->alt_lna_conf =
1295 antcomb->second_quick_scan_conf;
1296 } else {
1297 /* main is largest */
1298 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1299 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1300 /* Set alt LNA1 or LNA2 */
1301 if (div_ant_conf->main_lna_conf ==
1302 ATH_ANT_DIV_COMB_LNA2)
1303 div_ant_conf->alt_lna_conf =
1304 ATH_ANT_DIV_COMB_LNA1;
1305 else
1306 div_ant_conf->alt_lna_conf =
1307 ATH_ANT_DIV_COMB_LNA2;
1308 else
1309 /* Set alt to A+B or A-B */
1310 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1311 }
1312 break;
1313 default:
1314 break;
1315 }
1316}
1317
3e9a212a
MSS
1318static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1319 struct ath_ant_comb *antcomb, int alt_ratio)
102885a5 1320{
3e9a212a
MSS
1321 if (ant_conf->div_group == 0) {
1322 /* Adjust the fast_div_bias based on main and alt lna conf */
1323 switch ((ant_conf->main_lna_conf << 4) |
1324 ant_conf->alt_lna_conf) {
223c5a87 1325 case 0x01: /* A-B LNA2 */
3e9a212a
MSS
1326 ant_conf->fast_div_bias = 0x3b;
1327 break;
223c5a87 1328 case 0x02: /* A-B LNA1 */
3e9a212a
MSS
1329 ant_conf->fast_div_bias = 0x3d;
1330 break;
223c5a87 1331 case 0x03: /* A-B A+B */
3e9a212a
MSS
1332 ant_conf->fast_div_bias = 0x1;
1333 break;
223c5a87 1334 case 0x10: /* LNA2 A-B */
3e9a212a
MSS
1335 ant_conf->fast_div_bias = 0x7;
1336 break;
223c5a87 1337 case 0x12: /* LNA2 LNA1 */
3e9a212a
MSS
1338 ant_conf->fast_div_bias = 0x2;
1339 break;
223c5a87 1340 case 0x13: /* LNA2 A+B */
3e9a212a
MSS
1341 ant_conf->fast_div_bias = 0x7;
1342 break;
223c5a87 1343 case 0x20: /* LNA1 A-B */
3e9a212a
MSS
1344 ant_conf->fast_div_bias = 0x6;
1345 break;
223c5a87 1346 case 0x21: /* LNA1 LNA2 */
3e9a212a
MSS
1347 ant_conf->fast_div_bias = 0x0;
1348 break;
223c5a87 1349 case 0x23: /* LNA1 A+B */
3e9a212a
MSS
1350 ant_conf->fast_div_bias = 0x6;
1351 break;
223c5a87 1352 case 0x30: /* A+B A-B */
3e9a212a
MSS
1353 ant_conf->fast_div_bias = 0x1;
1354 break;
223c5a87 1355 case 0x31: /* A+B LNA2 */
3e9a212a
MSS
1356 ant_conf->fast_div_bias = 0x3b;
1357 break;
223c5a87 1358 case 0x32: /* A+B LNA1 */
3e9a212a
MSS
1359 ant_conf->fast_div_bias = 0x3d;
1360 break;
1361 default:
1362 break;
1363 }
e7ef5bc0
GJ
1364 } else if (ant_conf->div_group == 1) {
1365 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1366 switch ((ant_conf->main_lna_conf << 4) |
1367 ant_conf->alt_lna_conf) {
1368 case 0x01: /* A-B LNA2 */
1369 ant_conf->fast_div_bias = 0x1;
1370 ant_conf->main_gaintb = 0;
1371 ant_conf->alt_gaintb = 0;
1372 break;
1373 case 0x02: /* A-B LNA1 */
1374 ant_conf->fast_div_bias = 0x1;
1375 ant_conf->main_gaintb = 0;
1376 ant_conf->alt_gaintb = 0;
1377 break;
1378 case 0x03: /* A-B A+B */
1379 ant_conf->fast_div_bias = 0x1;
1380 ant_conf->main_gaintb = 0;
1381 ant_conf->alt_gaintb = 0;
1382 break;
1383 case 0x10: /* LNA2 A-B */
1384 if (!(antcomb->scan) &&
1385 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1386 ant_conf->fast_div_bias = 0x3f;
1387 else
1388 ant_conf->fast_div_bias = 0x1;
1389 ant_conf->main_gaintb = 0;
1390 ant_conf->alt_gaintb = 0;
1391 break;
1392 case 0x12: /* LNA2 LNA1 */
1393 ant_conf->fast_div_bias = 0x1;
1394 ant_conf->main_gaintb = 0;
1395 ant_conf->alt_gaintb = 0;
1396 break;
1397 case 0x13: /* LNA2 A+B */
1398 if (!(antcomb->scan) &&
1399 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1400 ant_conf->fast_div_bias = 0x3f;
1401 else
1402 ant_conf->fast_div_bias = 0x1;
1403 ant_conf->main_gaintb = 0;
1404 ant_conf->alt_gaintb = 0;
1405 break;
1406 case 0x20: /* LNA1 A-B */
1407 if (!(antcomb->scan) &&
1408 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1409 ant_conf->fast_div_bias = 0x3f;
1410 else
1411 ant_conf->fast_div_bias = 0x1;
1412 ant_conf->main_gaintb = 0;
1413 ant_conf->alt_gaintb = 0;
1414 break;
1415 case 0x21: /* LNA1 LNA2 */
1416 ant_conf->fast_div_bias = 0x1;
1417 ant_conf->main_gaintb = 0;
1418 ant_conf->alt_gaintb = 0;
1419 break;
1420 case 0x23: /* LNA1 A+B */
1421 if (!(antcomb->scan) &&
1422 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1423 ant_conf->fast_div_bias = 0x3f;
1424 else
1425 ant_conf->fast_div_bias = 0x1;
1426 ant_conf->main_gaintb = 0;
1427 ant_conf->alt_gaintb = 0;
1428 break;
1429 case 0x30: /* A+B A-B */
1430 ant_conf->fast_div_bias = 0x1;
1431 ant_conf->main_gaintb = 0;
1432 ant_conf->alt_gaintb = 0;
1433 break;
1434 case 0x31: /* A+B LNA2 */
1435 ant_conf->fast_div_bias = 0x1;
1436 ant_conf->main_gaintb = 0;
1437 ant_conf->alt_gaintb = 0;
1438 break;
1439 case 0x32: /* A+B LNA1 */
1440 ant_conf->fast_div_bias = 0x1;
1441 ant_conf->main_gaintb = 0;
1442 ant_conf->alt_gaintb = 0;
1443 break;
1444 default:
1445 break;
1446 }
3e9a212a
MSS
1447 } else if (ant_conf->div_group == 2) {
1448 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1449 switch ((ant_conf->main_lna_conf << 4) |
1450 ant_conf->alt_lna_conf) {
223c5a87 1451 case 0x01: /* A-B LNA2 */
3e9a212a
MSS
1452 ant_conf->fast_div_bias = 0x1;
1453 ant_conf->main_gaintb = 0;
1454 ant_conf->alt_gaintb = 0;
1455 break;
223c5a87 1456 case 0x02: /* A-B LNA1 */
3e9a212a
MSS
1457 ant_conf->fast_div_bias = 0x1;
1458 ant_conf->main_gaintb = 0;
1459 ant_conf->alt_gaintb = 0;
1460 break;
223c5a87 1461 case 0x03: /* A-B A+B */
3e9a212a
MSS
1462 ant_conf->fast_div_bias = 0x1;
1463 ant_conf->main_gaintb = 0;
1464 ant_conf->alt_gaintb = 0;
1465 break;
223c5a87 1466 case 0x10: /* LNA2 A-B */
3e9a212a
MSS
1467 if (!(antcomb->scan) &&
1468 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1469 ant_conf->fast_div_bias = 0x1;
1470 else
1471 ant_conf->fast_div_bias = 0x2;
1472 ant_conf->main_gaintb = 0;
1473 ant_conf->alt_gaintb = 0;
1474 break;
223c5a87 1475 case 0x12: /* LNA2 LNA1 */
3e9a212a
MSS
1476 ant_conf->fast_div_bias = 0x1;
1477 ant_conf->main_gaintb = 0;
1478 ant_conf->alt_gaintb = 0;
1479 break;
223c5a87 1480 case 0x13: /* LNA2 A+B */
3e9a212a
MSS
1481 if (!(antcomb->scan) &&
1482 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1483 ant_conf->fast_div_bias = 0x1;
1484 else
1485 ant_conf->fast_div_bias = 0x2;
1486 ant_conf->main_gaintb = 0;
1487 ant_conf->alt_gaintb = 0;
1488 break;
223c5a87 1489 case 0x20: /* LNA1 A-B */
3e9a212a
MSS
1490 if (!(antcomb->scan) &&
1491 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1492 ant_conf->fast_div_bias = 0x1;
1493 else
1494 ant_conf->fast_div_bias = 0x2;
1495 ant_conf->main_gaintb = 0;
1496 ant_conf->alt_gaintb = 0;
1497 break;
223c5a87 1498 case 0x21: /* LNA1 LNA2 */
3e9a212a
MSS
1499 ant_conf->fast_div_bias = 0x1;
1500 ant_conf->main_gaintb = 0;
1501 ant_conf->alt_gaintb = 0;
1502 break;
223c5a87 1503 case 0x23: /* LNA1 A+B */
3e9a212a
MSS
1504 if (!(antcomb->scan) &&
1505 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1506 ant_conf->fast_div_bias = 0x1;
1507 else
1508 ant_conf->fast_div_bias = 0x2;
1509 ant_conf->main_gaintb = 0;
1510 ant_conf->alt_gaintb = 0;
1511 break;
223c5a87 1512 case 0x30: /* A+B A-B */
3e9a212a
MSS
1513 ant_conf->fast_div_bias = 0x1;
1514 ant_conf->main_gaintb = 0;
1515 ant_conf->alt_gaintb = 0;
1516 break;
223c5a87 1517 case 0x31: /* A+B LNA2 */
3e9a212a
MSS
1518 ant_conf->fast_div_bias = 0x1;
1519 ant_conf->main_gaintb = 0;
1520 ant_conf->alt_gaintb = 0;
1521 break;
223c5a87 1522 case 0x32: /* A+B LNA1 */
3e9a212a
MSS
1523 ant_conf->fast_div_bias = 0x1;
1524 ant_conf->main_gaintb = 0;
1525 ant_conf->alt_gaintb = 0;
1526 break;
1527 default:
1528 break;
1529 }
102885a5
VT
1530 }
1531}
1532
1533/* Antenna diversity and combining */
1534static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1535{
1536 struct ath_hw_antcomb_conf div_ant_conf;
1537 struct ath_ant_comb *antcomb = &sc->ant_comb;
1538 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
0ff2b5c0 1539 int curr_main_set;
102885a5
VT
1540 int main_rssi = rs->rs_rssi_ctl0;
1541 int alt_rssi = rs->rs_rssi_ctl1;
1542 int rx_ant_conf, main_ant_conf;
1543 bool short_scan = false;
1544
1545 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1546 ATH_ANT_RX_MASK;
1547 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1548 ATH_ANT_RX_MASK;
1549
21e8ee6d
MSS
1550 /* Record packet only when both main_rssi and alt_rssi is positive */
1551 if (main_rssi > 0 && alt_rssi > 0) {
102885a5
VT
1552 antcomb->total_pkt_count++;
1553 antcomb->main_total_rssi += main_rssi;
1554 antcomb->alt_total_rssi += alt_rssi;
1555 if (main_ant_conf == rx_ant_conf)
1556 antcomb->main_recv_cnt++;
1557 else
1558 antcomb->alt_recv_cnt++;
1559 }
1560
1561 /* Short scan check */
1562 if (antcomb->scan && antcomb->alt_good) {
1563 if (time_after(jiffies, antcomb->scan_start_time +
1564 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1565 short_scan = true;
1566 else
1567 if (antcomb->total_pkt_count ==
1568 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1569 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1570 antcomb->total_pkt_count);
1571 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1572 short_scan = true;
1573 }
1574 }
1575
1576 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1577 rs->rs_moreaggr) && !short_scan)
1578 return;
1579
1580 if (antcomb->total_pkt_count) {
1581 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1582 antcomb->total_pkt_count);
1583 main_rssi_avg = (antcomb->main_total_rssi /
1584 antcomb->total_pkt_count);
1585 alt_rssi_avg = (antcomb->alt_total_rssi /
1586 antcomb->total_pkt_count);
1587 }
1588
1589
1590 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1591 curr_alt_set = div_ant_conf.alt_lna_conf;
1592 curr_main_set = div_ant_conf.main_lna_conf;
102885a5
VT
1593
1594 antcomb->count++;
1595
1596 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1597 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1598 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1599 main_rssi_avg);
1600 antcomb->alt_good = true;
1601 } else {
1602 antcomb->alt_good = false;
1603 }
1604
1605 antcomb->count = 0;
1606 antcomb->scan = true;
1607 antcomb->scan_not_start = true;
1608 }
1609
1610 if (!antcomb->scan) {
b85c5734
MSS
1611 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1612 alt_ratio, curr_main_set, curr_alt_set,
1613 alt_rssi_avg, main_rssi_avg)) {
102885a5
VT
1614 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1615 /* Switch main and alt LNA */
1616 div_ant_conf.main_lna_conf =
1617 ATH_ANT_DIV_COMB_LNA2;
1618 div_ant_conf.alt_lna_conf =
1619 ATH_ANT_DIV_COMB_LNA1;
1620 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1621 div_ant_conf.main_lna_conf =
1622 ATH_ANT_DIV_COMB_LNA1;
1623 div_ant_conf.alt_lna_conf =
1624 ATH_ANT_DIV_COMB_LNA2;
1625 }
1626
1627 goto div_comb_done;
1628 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1629 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1630 /* Set alt to another LNA */
1631 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1632 div_ant_conf.alt_lna_conf =
1633 ATH_ANT_DIV_COMB_LNA1;
1634 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1635 div_ant_conf.alt_lna_conf =
1636 ATH_ANT_DIV_COMB_LNA2;
1637
1638 goto div_comb_done;
1639 }
1640
1641 if ((alt_rssi_avg < (main_rssi_avg +
8afbcc8b 1642 div_ant_conf.lna1_lna2_delta)))
102885a5
VT
1643 goto div_comb_done;
1644 }
1645
1646 if (!antcomb->scan_not_start) {
1647 switch (curr_alt_set) {
1648 case ATH_ANT_DIV_COMB_LNA2:
1649 antcomb->rssi_lna2 = alt_rssi_avg;
1650 antcomb->rssi_lna1 = main_rssi_avg;
1651 antcomb->scan = true;
1652 /* set to A+B */
1653 div_ant_conf.main_lna_conf =
1654 ATH_ANT_DIV_COMB_LNA1;
1655 div_ant_conf.alt_lna_conf =
1656 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1657 break;
1658 case ATH_ANT_DIV_COMB_LNA1:
1659 antcomb->rssi_lna1 = alt_rssi_avg;
1660 antcomb->rssi_lna2 = main_rssi_avg;
1661 antcomb->scan = true;
1662 /* set to A+B */
1663 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1664 div_ant_conf.alt_lna_conf =
1665 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1666 break;
1667 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1668 antcomb->rssi_add = alt_rssi_avg;
1669 antcomb->scan = true;
1670 /* set to A-B */
1671 div_ant_conf.alt_lna_conf =
1672 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1673 break;
1674 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1675 antcomb->rssi_sub = alt_rssi_avg;
1676 antcomb->scan = false;
1677 if (antcomb->rssi_lna2 >
1678 (antcomb->rssi_lna1 +
1679 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1680 /* use LNA2 as main LNA */
1681 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1682 (antcomb->rssi_add > antcomb->rssi_sub)) {
1683 /* set to A+B */
1684 div_ant_conf.main_lna_conf =
1685 ATH_ANT_DIV_COMB_LNA2;
1686 div_ant_conf.alt_lna_conf =
1687 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1688 } else if (antcomb->rssi_sub >
1689 antcomb->rssi_lna1) {
1690 /* set to A-B */
1691 div_ant_conf.main_lna_conf =
1692 ATH_ANT_DIV_COMB_LNA2;
1693 div_ant_conf.alt_lna_conf =
1694 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1695 } else {
1696 /* set to LNA1 */
1697 div_ant_conf.main_lna_conf =
1698 ATH_ANT_DIV_COMB_LNA2;
1699 div_ant_conf.alt_lna_conf =
1700 ATH_ANT_DIV_COMB_LNA1;
1701 }
1702 } else {
1703 /* use LNA1 as main LNA */
1704 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1705 (antcomb->rssi_add > antcomb->rssi_sub)) {
1706 /* set to A+B */
1707 div_ant_conf.main_lna_conf =
1708 ATH_ANT_DIV_COMB_LNA1;
1709 div_ant_conf.alt_lna_conf =
1710 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1711 } else if (antcomb->rssi_sub >
1712 antcomb->rssi_lna1) {
1713 /* set to A-B */
1714 div_ant_conf.main_lna_conf =
1715 ATH_ANT_DIV_COMB_LNA1;
1716 div_ant_conf.alt_lna_conf =
1717 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1718 } else {
1719 /* set to LNA2 */
1720 div_ant_conf.main_lna_conf =
1721 ATH_ANT_DIV_COMB_LNA1;
1722 div_ant_conf.alt_lna_conf =
1723 ATH_ANT_DIV_COMB_LNA2;
1724 }
1725 }
1726 break;
1727 default:
1728 break;
1729 }
1730 } else {
1731 if (!antcomb->alt_good) {
1732 antcomb->scan_not_start = false;
1733 /* Set alt to another LNA */
1734 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1735 div_ant_conf.main_lna_conf =
1736 ATH_ANT_DIV_COMB_LNA2;
1737 div_ant_conf.alt_lna_conf =
1738 ATH_ANT_DIV_COMB_LNA1;
1739 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1740 div_ant_conf.main_lna_conf =
1741 ATH_ANT_DIV_COMB_LNA1;
1742 div_ant_conf.alt_lna_conf =
1743 ATH_ANT_DIV_COMB_LNA2;
1744 }
1745 goto div_comb_done;
1746 }
1747 }
1748
1749 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1750 main_rssi_avg, alt_rssi_avg,
1751 alt_ratio);
1752
1753 antcomb->quick_scan_cnt++;
1754
1755div_comb_done:
3e9a212a 1756 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
102885a5
VT
1757 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1758
1759 antcomb->scan_start_time = jiffies;
1760 antcomb->total_pkt_count = 0;
1761 antcomb->main_total_rssi = 0;
1762 antcomb->alt_total_rssi = 0;
1763 antcomb->main_recv_cnt = 0;
1764 antcomb->alt_recv_cnt = 0;
1765}
1766
b5c80475
FF
1767int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1768{
1769 struct ath_buf *bf;
0d95521e 1770 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
5ca42627 1771 struct ieee80211_rx_status *rxs;
cbe61d8a 1772 struct ath_hw *ah = sc->sc_ah;
27c51f1a 1773 struct ath_common *common = ath9k_hw_common(ah);
7545daf4 1774 struct ieee80211_hw *hw = sc->hw;
be0418ad 1775 struct ieee80211_hdr *hdr;
c9b14170 1776 int retval;
be0418ad 1777 bool decrypt_error = false;
29bffa96 1778 struct ath_rx_status rs;
b5c80475
FF
1779 enum ath9k_rx_qtype qtype;
1780 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1781 int dma_type;
5c6dd921 1782 u8 rx_status_len = ah->caps.rx_status_len;
a6d2055b
FF
1783 u64 tsf = 0;
1784 u32 tsf_lower = 0;
8ab2cd09 1785 unsigned long flags;
be0418ad 1786
b5c80475 1787 if (edma)
b5c80475 1788 dma_type = DMA_BIDIRECTIONAL;
56824223
ML
1789 else
1790 dma_type = DMA_FROM_DEVICE;
b5c80475
FF
1791
1792 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
b77f483f 1793 spin_lock_bh(&sc->rx.rxbuflock);
f078f209 1794
a6d2055b
FF
1795 tsf = ath9k_hw_gettsf64(ah);
1796 tsf_lower = tsf & 0xffffffff;
1797
f078f209
LR
1798 do {
1799 /* If handling rx interrupt and flush is in progress => exit */
98deeea0 1800 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
f078f209
LR
1801 break;
1802
29bffa96 1803 memset(&rs, 0, sizeof(rs));
b5c80475
FF
1804 if (edma)
1805 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1806 else
1807 bf = ath_get_next_rx_buf(sc, &rs);
f078f209 1808
b5c80475
FF
1809 if (!bf)
1810 break;
f078f209 1811
f078f209 1812 skb = bf->bf_mpdu;
be0418ad 1813 if (!skb)
f078f209 1814 continue;
f078f209 1815
0d95521e
FF
1816 /*
1817 * Take frame header from the first fragment and RX status from
1818 * the last one.
1819 */
1820 if (sc->rx.frag)
1821 hdr_skb = sc->rx.frag;
1822 else
1823 hdr_skb = skb;
1824
1825 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1826 rxs = IEEE80211_SKB_RXCB(hdr_skb);
cf3af748 1827 if (ieee80211_is_beacon(hdr->frame_control) &&
356cb55d 1828 !is_zero_ether_addr(common->curbssid) &&
cf3af748
RM
1829 !compare_ether_addr(hdr->addr3, common->curbssid))
1830 rs.is_mybeacon = true;
1831 else
1832 rs.is_mybeacon = false;
5ca42627 1833
29bffa96 1834 ath_debug_stat_rx(sc, &rs);
1395d3f0 1835
f078f209 1836 /*
be0418ad
S
1837 * If we're asked to flush receive queue, directly
1838 * chain it back at the queue without processing it.
f078f209 1839 */
3483288c 1840 if (sc->sc_flags & SC_OP_RXFLUSH)
0d95521e 1841 goto requeue_drop_frag;
f078f209 1842
a6d2055b
FF
1843 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1844 if (rs.rs_tstamp > tsf_lower &&
1845 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1846 rxs->mactime -= 0x100000000ULL;
1847
1848 if (rs.rs_tstamp < tsf_lower &&
1849 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1850 rxs->mactime += 0x100000000ULL;
1851
83c76570
ZK
1852 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1853 rxs, &decrypt_error);
1854 if (retval)
1855 goto requeue_drop_frag;
1856
cb71d9ba
LR
1857 /* Ensure we always have an skb to requeue once we are done
1858 * processing the current buffer's skb */
cc861f74 1859 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
cb71d9ba
LR
1860
1861 /* If there is no memory we ignore the current RX'd frame,
1862 * tell hardware it can give us a new frame using the old
b77f483f 1863 * skb and put it at the tail of the sc->rx.rxbuf list for
cb71d9ba
LR
1864 * processing. */
1865 if (!requeue_skb)
0d95521e 1866 goto requeue_drop_frag;
f078f209 1867
9bf9fca8 1868 /* Unmap the frame */
7da3c55c 1869 dma_unmap_single(sc->dev, bf->bf_buf_addr,
cc861f74 1870 common->rx_bufsize,
b5c80475 1871 dma_type);
f078f209 1872
b5c80475
FF
1873 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1874 if (ah->caps.rx_status_len)
1875 skb_pull(skb, ah->caps.rx_status_len);
be0418ad 1876
0d95521e
FF
1877 if (!rs.rs_more)
1878 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1879 rxs, decrypt_error);
be0418ad 1880
cb71d9ba
LR
1881 /* We will now give hardware our shiny new allocated skb */
1882 bf->bf_mpdu = requeue_skb;
7da3c55c 1883 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
cc861f74 1884 common->rx_bufsize,
b5c80475 1885 dma_type);
7da3c55c 1886 if (unlikely(dma_mapping_error(sc->dev,
f8316df1
LR
1887 bf->bf_buf_addr))) {
1888 dev_kfree_skb_any(requeue_skb);
1889 bf->bf_mpdu = NULL;
6cf9e995 1890 bf->bf_buf_addr = 0;
3800276a 1891 ath_err(common, "dma_mapping_error() on RX\n");
7545daf4 1892 ieee80211_rx(hw, skb);
f8316df1
LR
1893 break;
1894 }
f078f209 1895
0d95521e
FF
1896 if (rs.rs_more) {
1897 /*
1898 * rs_more indicates chained descriptors which can be
1899 * used to link buffers together for a sort of
1900 * scatter-gather operation.
1901 */
1902 if (sc->rx.frag) {
1903 /* too many fragments - cannot handle frame */
1904 dev_kfree_skb_any(sc->rx.frag);
1905 dev_kfree_skb_any(skb);
1906 skb = NULL;
1907 }
1908 sc->rx.frag = skb;
1909 goto requeue;
1910 }
1911
1912 if (sc->rx.frag) {
1913 int space = skb->len - skb_tailroom(hdr_skb);
1914
1915 sc->rx.frag = NULL;
1916
1917 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1918 dev_kfree_skb(skb);
1919 goto requeue_drop_frag;
1920 }
1921
1922 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1923 skb->len);
1924 dev_kfree_skb_any(skb);
1925 skb = hdr_skb;
1926 }
1927
eb840a80
MSS
1928
1929 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1930
1931 /*
1932 * change the default rx antenna if rx diversity
1933 * chooses the other antenna 3 times in a row.
1934 */
1935 if (sc->rx.defant != rs.rs_antenna) {
1936 if (++sc->rx.rxotherant >= 3)
1937 ath_setdefantenna(sc, rs.rs_antenna);
1938 } else {
1939 sc->rx.rxotherant = 0;
1940 }
1941
f078f209 1942 }
3cbb5dd7 1943
66760eac
FF
1944 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1945 skb_trim(skb, skb->len - 8);
1946
8ab2cd09 1947 spin_lock_irqsave(&sc->sc_pm_lock, flags);
aaef24b4
MSS
1948
1949 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
f73c604c
RM
1950 PS_WAIT_FOR_CAB |
1951 PS_WAIT_FOR_PSPOLL_DATA)) ||
1952 ath9k_check_auto_sleep(sc))
1953 ath_rx_ps(sc, skb, rs.is_mybeacon);
8ab2cd09 1954 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
cc65965c 1955
43c35284 1956 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
102885a5
VT
1957 ath_ant_comb_scan(sc, &rs);
1958
7545daf4 1959 ieee80211_rx(hw, skb);
cc65965c 1960
0d95521e
FF
1961requeue_drop_frag:
1962 if (sc->rx.frag) {
1963 dev_kfree_skb_any(sc->rx.frag);
1964 sc->rx.frag = NULL;
1965 }
cb71d9ba 1966requeue:
b5c80475
FF
1967 if (edma) {
1968 list_add_tail(&bf->list, &sc->rx.rxbuf);
1969 ath_rx_edma_buf_link(sc, qtype);
1970 } else {
1971 list_move_tail(&bf->list, &sc->rx.rxbuf);
1972 ath_rx_buf_link(sc, bf);
3483288c
FF
1973 if (!flush)
1974 ath9k_hw_rxena(ah);
b5c80475 1975 }
be0418ad
S
1976 } while (1);
1977
b77f483f 1978 spin_unlock_bh(&sc->rx.rxbuflock);
f078f209 1979
29ab0b36
RM
1980 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1981 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
72d874c6 1982 ath9k_hw_set_interrupts(ah);
29ab0b36
RM
1983 }
1984
f078f209 1985 return 0;
f078f209 1986}
This page took 1.303585 seconds and 5 git commands to generate.