2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
19 #include "ar9003_mac.h"
21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
23 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
25 return sc
->ps_enabled
&&
26 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
30 * Setup and link descriptors.
32 * 11N: we can no longer afford to self link the last descriptor.
33 * MAC acknowledges BA status as long as it copies frames to host
34 * buffer (or rx fifo). This can incorrectly acknowledge packets
35 * to a sender if last desc is self-linked.
37 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_rxbuf
*bf
)
39 struct ath_hw
*ah
= sc
->sc_ah
;
40 struct ath_common
*common
= ath9k_hw_common(ah
);
45 ds
->ds_link
= 0; /* link to null */
46 ds
->ds_data
= bf
->bf_buf_addr
;
48 /* virtual addr of the beginning of the buffer. */
51 ds
->ds_vdata
= skb
->data
;
54 * setup rx descriptors. The rx_bufsize here tells the hardware
55 * how much data it can DMA to us and that we are prepared
58 ath9k_hw_setuprxdesc(ah
, ds
,
62 if (sc
->rx
.rxlink
== NULL
)
63 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
65 *sc
->rx
.rxlink
= bf
->bf_daddr
;
67 sc
->rx
.rxlink
= &ds
->ds_link
;
70 static void ath_rx_buf_relink(struct ath_softc
*sc
, struct ath_rxbuf
*bf
)
73 ath_rx_buf_link(sc
, sc
->rx
.buf_hold
);
78 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
80 /* XXX block beacon interrupts */
81 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
82 sc
->rx
.defant
= antenna
;
83 sc
->rx
.rxotherant
= 0;
86 static void ath_opmode_init(struct ath_softc
*sc
)
88 struct ath_hw
*ah
= sc
->sc_ah
;
89 struct ath_common
*common
= ath9k_hw_common(ah
);
93 /* configure rx filter */
94 rfilt
= ath_calcrxfilter(sc
);
95 ath9k_hw_setrxfilter(ah
, rfilt
);
97 /* configure bssid mask */
98 ath_hw_setbssidmask(common
);
100 /* configure operational mode */
101 ath9k_hw_setopmode(ah
);
103 /* calculate and install multicast filter */
104 mfilt
[0] = mfilt
[1] = ~0;
105 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
108 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
109 enum ath9k_rx_qtype qtype
)
111 struct ath_hw
*ah
= sc
->sc_ah
;
112 struct ath_rx_edma
*rx_edma
;
114 struct ath_rxbuf
*bf
;
116 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
117 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
120 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_rxbuf
, list
);
121 list_del_init(&bf
->list
);
125 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
126 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
127 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
129 SKB_CB_ATHBUF(skb
) = bf
;
130 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
131 __skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
136 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
137 enum ath9k_rx_qtype qtype
)
139 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
140 struct ath_rxbuf
*bf
, *tbf
;
142 if (list_empty(&sc
->rx
.rxbuf
)) {
143 ath_dbg(common
, QUEUE
, "No free rx buf available\n");
147 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
)
148 if (!ath_rx_edma_buf_link(sc
, qtype
))
153 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
154 enum ath9k_rx_qtype qtype
)
156 struct ath_rxbuf
*bf
;
157 struct ath_rx_edma
*rx_edma
;
160 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
162 while ((skb
= __skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
163 bf
= SKB_CB_ATHBUF(skb
);
165 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
169 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
171 struct ath_hw
*ah
= sc
->sc_ah
;
172 struct ath_common
*common
= ath9k_hw_common(ah
);
173 struct ath_rxbuf
*bf
;
175 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
176 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
178 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
180 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
183 dev_kfree_skb_any(bf
->bf_mpdu
);
190 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
192 __skb_queue_head_init(&rx_edma
->rx_fifo
);
193 rx_edma
->rx_fifo_hwsize
= size
;
196 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
198 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
199 struct ath_hw
*ah
= sc
->sc_ah
;
201 struct ath_rxbuf
*bf
;
205 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
206 ah
->caps
.rx_status_len
);
208 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
209 ah
->caps
.rx_lp_qdepth
);
210 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
211 ah
->caps
.rx_hp_qdepth
);
213 size
= sizeof(struct ath_rxbuf
) * nbufs
;
214 bf
= devm_kzalloc(sc
->dev
, size
, GFP_KERNEL
);
218 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
220 for (i
= 0; i
< nbufs
; i
++, bf
++) {
221 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
227 memset(skb
->data
, 0, common
->rx_bufsize
);
230 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
233 if (unlikely(dma_mapping_error(sc
->dev
,
235 dev_kfree_skb_any(skb
);
239 "dma_mapping_error() on RX init\n");
244 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
250 ath_rx_edma_cleanup(sc
);
254 static void ath_edma_start_recv(struct ath_softc
*sc
)
256 ath9k_hw_rxena(sc
->sc_ah
);
257 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
);
258 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
);
260 ath9k_hw_startpcureceive(sc
->sc_ah
, !!(sc
->hw
->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
));
263 static void ath_edma_stop_recv(struct ath_softc
*sc
)
265 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
266 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
269 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
271 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
273 struct ath_rxbuf
*bf
;
276 spin_lock_init(&sc
->sc_pcu_lock
);
278 common
->rx_bufsize
= IEEE80211_MAX_MPDU_LEN
/ 2 +
279 sc
->sc_ah
->caps
.rx_status_len
;
281 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
282 return ath_rx_edma_init(sc
, nbufs
);
284 ath_dbg(common
, CONFIG
, "cachelsz %u rxbufsize %u\n",
285 common
->cachelsz
, common
->rx_bufsize
);
287 /* Initialize rx descriptors */
289 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
293 "failed to allocate rx descriptors: %d\n",
298 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
299 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
307 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
310 if (unlikely(dma_mapping_error(sc
->dev
,
312 dev_kfree_skb_any(skb
);
316 "dma_mapping_error() on RX init\n");
321 sc
->rx
.rxlink
= NULL
;
329 void ath_rx_cleanup(struct ath_softc
*sc
)
331 struct ath_hw
*ah
= sc
->sc_ah
;
332 struct ath_common
*common
= ath9k_hw_common(ah
);
334 struct ath_rxbuf
*bf
;
336 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
337 ath_rx_edma_cleanup(sc
);
341 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
344 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
355 * Calculate the receive filter according to the
356 * operating mode and state:
358 * o always accept unicast, broadcast, and multicast traffic
359 * o maintain current state of phy error reception (the hal
360 * may enable phy error frames for noise immunity work)
361 * o probe request frames are accepted only when operating in
362 * hostap, adhoc, or monitor modes
363 * o enable promiscuous mode according to the interface state
365 * - when operating in adhoc mode so the 802.11 layer creates
366 * node table entries for peers,
367 * - when operating in station mode for collecting rssi data when
368 * the station is otherwise quiet, or
369 * - when operating as a repeater so we see repeater-sta beacons
373 u32
ath_calcrxfilter(struct ath_softc
*sc
)
377 if (config_enabled(CONFIG_ATH9K_TX99
))
380 rfilt
= ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
381 | ATH9K_RX_FILTER_MCAST
;
383 /* if operating on a DFS channel, enable radar pulse detection */
384 if (sc
->hw
->conf
.radar_enabled
)
385 rfilt
|= ATH9K_RX_FILTER_PHYRADAR
| ATH9K_RX_FILTER_PHYERR
;
387 if (sc
->rx
.rxfilter
& FIF_PROBE_REQ
)
388 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
391 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
392 * mode interface or when in monitor mode. AP mode does not need this
393 * since it receives all in-BSS frames anyway.
395 if (sc
->sc_ah
->is_monitoring
)
396 rfilt
|= ATH9K_RX_FILTER_PROM
;
398 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
399 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
401 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
403 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
404 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
406 rfilt
|= ATH9K_RX_FILTER_BEACON
;
408 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) ||
409 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
410 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
412 if (conf_is_ht(&sc
->hw
->conf
))
413 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
415 if (sc
->nvifs
> 1 || (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
416 /* This is needed for older chips */
417 if (sc
->sc_ah
->hw_version
.macVersion
<= AR_SREV_VERSION_9160
)
418 rfilt
|= ATH9K_RX_FILTER_PROM
;
419 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
422 if (AR_SREV_9550(sc
->sc_ah
) || AR_SREV_9531(sc
->sc_ah
))
423 rfilt
|= ATH9K_RX_FILTER_4ADDRESS
;
429 int ath_startrecv(struct ath_softc
*sc
)
431 struct ath_hw
*ah
= sc
->sc_ah
;
432 struct ath_rxbuf
*bf
, *tbf
;
434 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
435 ath_edma_start_recv(sc
);
439 if (list_empty(&sc
->rx
.rxbuf
))
442 sc
->rx
.buf_hold
= NULL
;
443 sc
->rx
.rxlink
= NULL
;
444 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
445 ath_rx_buf_link(sc
, bf
);
448 /* We could have deleted elements so the list may be empty now */
449 if (list_empty(&sc
->rx
.rxbuf
))
452 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_rxbuf
, list
);
453 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
458 ath9k_hw_startpcureceive(ah
, !!(sc
->hw
->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
));
463 static void ath_flushrecv(struct ath_softc
*sc
)
465 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
466 ath_rx_tasklet(sc
, 1, true);
467 ath_rx_tasklet(sc
, 1, false);
470 bool ath_stoprecv(struct ath_softc
*sc
)
472 struct ath_hw
*ah
= sc
->sc_ah
;
473 bool stopped
, reset
= false;
475 ath9k_hw_abortpcurecv(ah
);
476 ath9k_hw_setrxfilter(ah
, 0);
477 stopped
= ath9k_hw_stopdmarecv(ah
, &reset
);
481 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
482 ath_edma_stop_recv(sc
);
484 sc
->rx
.rxlink
= NULL
;
486 if (!(ah
->ah_flags
& AH_UNPLUGGED
) &&
487 unlikely(!stopped
)) {
488 ath_err(ath9k_hw_common(sc
->sc_ah
),
489 "Could not stop RX, we could be "
490 "confusing the DMA engine when we start RX up\n");
491 ATH_DBG_WARN_ON_ONCE(!stopped
);
493 return stopped
&& !reset
;
496 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
498 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
499 struct ieee80211_mgmt
*mgmt
;
500 u8
*pos
, *end
, id
, elen
;
501 struct ieee80211_tim_ie
*tim
;
503 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
504 pos
= mgmt
->u
.beacon
.variable
;
505 end
= skb
->data
+ skb
->len
;
507 while (pos
+ 2 < end
) {
510 if (pos
+ elen
> end
)
513 if (id
== WLAN_EID_TIM
) {
514 if (elen
< sizeof(*tim
))
516 tim
= (struct ieee80211_tim_ie
*) pos
;
517 if (tim
->dtim_count
!= 0)
519 return tim
->bitmap_ctrl
& 0x01;
528 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
530 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
532 if (skb
->len
< 24 + 8 + 2 + 2)
535 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
537 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
538 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
540 "Reconfigure beacon timers based on synchronized timestamp\n");
541 ath9k_set_beacon(sc
);
544 if (ath_beacon_dtim_pending_cab(skb
)) {
546 * Remain awake waiting for buffered broadcast/multicast
547 * frames. If the last broadcast/multicast frame is not
548 * received properly, the next beacon frame will work as
549 * a backup trigger for returning into NETWORK SLEEP state,
550 * so we are waiting for it as well.
553 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
554 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
558 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
560 * This can happen if a broadcast frame is dropped or the AP
561 * fails to send a frame indicating that all CAB frames have
564 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
565 ath_dbg(common
, PS
, "PS wait for CAB frames timed out\n");
569 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
, bool mybeacon
)
571 struct ieee80211_hdr
*hdr
;
572 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
574 hdr
= (struct ieee80211_hdr
*)skb
->data
;
576 /* Process Beacon and CAB receive in PS state */
577 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
579 ath_rx_ps_beacon(sc
, skb
);
580 } else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
581 (ieee80211_is_data(hdr
->frame_control
) ||
582 ieee80211_is_action(hdr
->frame_control
)) &&
583 is_multicast_ether_addr(hdr
->addr1
) &&
584 !ieee80211_has_moredata(hdr
->frame_control
)) {
586 * No more broadcast/multicast frames to be received at this
589 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
591 "All PS CAB frames received, back to sleep\n");
592 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
593 !is_multicast_ether_addr(hdr
->addr1
) &&
594 !ieee80211_has_morefrags(hdr
->frame_control
)) {
595 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
597 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
598 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
600 PS_WAIT_FOR_PSPOLL_DATA
|
601 PS_WAIT_FOR_TX_ACK
));
605 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
606 enum ath9k_rx_qtype qtype
,
607 struct ath_rx_status
*rs
,
608 struct ath_rxbuf
**dest
)
610 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
611 struct ath_hw
*ah
= sc
->sc_ah
;
612 struct ath_common
*common
= ath9k_hw_common(ah
);
614 struct ath_rxbuf
*bf
;
617 skb
= skb_peek(&rx_edma
->rx_fifo
);
621 bf
= SKB_CB_ATHBUF(skb
);
624 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
625 common
->rx_bufsize
, DMA_FROM_DEVICE
);
627 ret
= ath9k_hw_process_rxdesc_edma(ah
, rs
, skb
->data
);
628 if (ret
== -EINPROGRESS
) {
629 /*let device gain the buffer again*/
630 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
631 common
->rx_bufsize
, DMA_FROM_DEVICE
);
635 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
636 if (ret
== -EINVAL
) {
637 /* corrupt descriptor, skip this one and the following one */
638 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
639 ath_rx_edma_buf_link(sc
, qtype
);
641 skb
= skb_peek(&rx_edma
->rx_fifo
);
643 bf
= SKB_CB_ATHBUF(skb
);
646 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
647 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
648 ath_rx_edma_buf_link(sc
, qtype
);
658 static struct ath_rxbuf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
659 struct ath_rx_status
*rs
,
660 enum ath9k_rx_qtype qtype
)
662 struct ath_rxbuf
*bf
= NULL
;
664 while (ath_edma_get_buffers(sc
, qtype
, rs
, &bf
)) {
673 static struct ath_rxbuf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
674 struct ath_rx_status
*rs
)
676 struct ath_hw
*ah
= sc
->sc_ah
;
677 struct ath_common
*common
= ath9k_hw_common(ah
);
679 struct ath_rxbuf
*bf
;
682 if (list_empty(&sc
->rx
.rxbuf
)) {
683 sc
->rx
.rxlink
= NULL
;
687 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_rxbuf
, list
);
688 if (bf
== sc
->rx
.buf_hold
)
694 * Must provide the virtual address of the current
695 * descriptor, the physical address, and the virtual
696 * address of the next descriptor in the h/w chain.
697 * This allows the HAL to look ahead to see if the
698 * hardware is done with a descriptor by checking the
699 * done bit in the following descriptor and the address
700 * of the current descriptor the DMA engine is working
701 * on. All this is necessary because of our use of
702 * a self-linked list to avoid rx overruns.
704 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
);
705 if (ret
== -EINPROGRESS
) {
706 struct ath_rx_status trs
;
707 struct ath_rxbuf
*tbf
;
708 struct ath_desc
*tds
;
710 memset(&trs
, 0, sizeof(trs
));
711 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
712 sc
->rx
.rxlink
= NULL
;
716 tbf
= list_entry(bf
->list
.next
, struct ath_rxbuf
, list
);
719 * On some hardware the descriptor status words could
720 * get corrupted, including the done bit. Because of
721 * this, check if the next descriptor's done bit is
724 * If the next descriptor's done bit is set, the current
725 * descriptor has been corrupted. Force s/w to discard
726 * this descriptor and continue...
730 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
);
731 if (ret
== -EINPROGRESS
)
735 * Re-check previous descriptor, in case it has been filled
738 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
);
739 if (ret
== -EINPROGRESS
) {
741 * mark descriptor as zero-length and set the 'more'
742 * flag to ensure that both buffers get discarded
754 * Synchronize the DMA transfer with CPU before
755 * 1. accessing the frame
756 * 2. requeueing the same buffer to h/w
758 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
765 static void ath9k_process_tsf(struct ath_rx_status
*rs
,
766 struct ieee80211_rx_status
*rxs
,
769 u32 tsf_lower
= tsf
& 0xffffffff;
771 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
->rs_tstamp
;
772 if (rs
->rs_tstamp
> tsf_lower
&&
773 unlikely(rs
->rs_tstamp
- tsf_lower
> 0x10000000))
774 rxs
->mactime
-= 0x100000000ULL
;
776 if (rs
->rs_tstamp
< tsf_lower
&&
777 unlikely(tsf_lower
- rs
->rs_tstamp
> 0x10000000))
778 rxs
->mactime
+= 0x100000000ULL
;
782 * For Decrypt or Demic errors, we only mark packet status here and always push
783 * up the frame up to let mac80211 handle the actual error case, be it no
784 * decryption key or real decryption error. This let us keep statistics there.
786 static int ath9k_rx_skb_preprocess(struct ath_softc
*sc
,
788 struct ath_rx_status
*rx_stats
,
789 struct ieee80211_rx_status
*rx_status
,
790 bool *decrypt_error
, u64 tsf
)
792 struct ieee80211_hw
*hw
= sc
->hw
;
793 struct ath_hw
*ah
= sc
->sc_ah
;
794 struct ath_common
*common
= ath9k_hw_common(ah
);
795 struct ieee80211_hdr
*hdr
;
796 bool discard_current
= sc
->rx
.discard_next
;
799 * Discard corrupt descriptors which are marked in
800 * ath_get_next_rx_buf().
805 sc
->rx
.discard_next
= false;
808 * Discard zero-length packets.
810 if (!rx_stats
->rs_datalen
) {
811 RX_STAT_INC(rx_len_err
);
816 * rs_status follows rs_datalen so if rs_datalen is too large
817 * we can take a hint that hardware corrupted it, so ignore
820 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- ah
->caps
.rx_status_len
)) {
821 RX_STAT_INC(rx_len_err
);
825 /* Only use status info from the last fragment */
826 if (rx_stats
->rs_more
)
830 * Return immediately if the RX descriptor has been marked
831 * as corrupt based on the various error bits.
833 * This is different from the other corrupt descriptor
834 * condition handled above.
836 if (rx_stats
->rs_status
& ATH9K_RXERR_CORRUPT_DESC
)
839 hdr
= (struct ieee80211_hdr
*) (skb
->data
+ ah
->caps
.rx_status_len
);
841 ath9k_process_tsf(rx_stats
, rx_status
, tsf
);
842 ath_debug_stat_rx(sc
, rx_stats
);
845 * Process PHY errors and return so that the packet
848 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
) {
849 ath9k_dfs_process_phyerr(sc
, hdr
, rx_stats
, rx_status
->mactime
);
850 if (ath_process_fft(sc
, hdr
, rx_stats
, rx_status
->mactime
))
851 RX_STAT_INC(rx_spectral
);
857 * everything but the rate is checked here, the rate check is done
858 * separately to avoid doing two lookups for a rate for each frame.
860 if (!ath9k_cmn_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
, sc
->rx
.rxfilter
))
863 if (ath_is_mybeacon(common
, hdr
)) {
864 RX_STAT_INC(rx_beacons
);
865 rx_stats
->is_mybeacon
= true;
869 * This shouldn't happen, but have a safety check anyway.
871 if (WARN_ON(!ah
->curchan
))
874 if (ath9k_cmn_process_rate(common
, hw
, rx_stats
, rx_status
)) {
876 * No valid hardware bitrate found -- we should not get here
877 * because hardware has already validated this frame as OK.
879 ath_dbg(common
, ANY
, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
881 RX_STAT_INC(rx_rate_err
);
885 ath9k_cmn_process_rssi(common
, hw
, rx_stats
, rx_status
);
887 rx_status
->band
= ah
->curchan
->chan
->band
;
888 rx_status
->freq
= ah
->curchan
->chan
->center_freq
;
889 rx_status
->antenna
= rx_stats
->rs_antenna
;
890 rx_status
->flag
|= RX_FLAG_MACTIME_END
;
892 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
893 if (ieee80211_is_data_present(hdr
->frame_control
) &&
894 !ieee80211_is_qos_nullfunc(hdr
->frame_control
))
901 sc
->rx
.discard_next
= rx_stats
->rs_more
;
906 * Run the LNA combining algorithm only in these cases:
908 * Standalone WLAN cards with both LNA/Antenna diversity
909 * enabled in the EEPROM.
911 * WLAN+BT cards which are in the supported card list
912 * in ath_pci_id_table and the user has loaded the
913 * driver with "bt_ant_diversity" set to true.
915 static void ath9k_antenna_check(struct ath_softc
*sc
,
916 struct ath_rx_status
*rs
)
918 struct ath_hw
*ah
= sc
->sc_ah
;
919 struct ath9k_hw_capabilities
*pCap
= &ah
->caps
;
920 struct ath_common
*common
= ath9k_hw_common(ah
);
922 if (!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
))
926 * Change the default rx antenna if rx diversity
927 * chooses the other antenna 3 times in a row.
929 if (sc
->rx
.defant
!= rs
->rs_antenna
) {
930 if (++sc
->rx
.rxotherant
>= 3)
931 ath_setdefantenna(sc
, rs
->rs_antenna
);
933 sc
->rx
.rxotherant
= 0;
936 if (pCap
->hw_caps
& ATH9K_HW_CAP_BT_ANT_DIV
) {
937 if (common
->bt_ant_diversity
)
938 ath_ant_comb_scan(sc
, rs
);
940 ath_ant_comb_scan(sc
, rs
);
944 static void ath9k_apply_ampdu_details(struct ath_softc
*sc
,
945 struct ath_rx_status
*rs
, struct ieee80211_rx_status
*rxs
)
948 rxs
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
950 rxs
->ampdu_reference
= sc
->rx
.ampdu_ref
;
952 if (!rs
->rs_moreaggr
) {
953 rxs
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
957 if (rs
->rs_flags
& ATH9K_RX_DELIM_CRC_PRE
)
958 rxs
->flag
|= RX_FLAG_AMPDU_DELIM_CRC_ERROR
;
962 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
964 struct ath_rxbuf
*bf
;
965 struct sk_buff
*skb
= NULL
, *requeue_skb
, *hdr_skb
;
966 struct ieee80211_rx_status
*rxs
;
967 struct ath_hw
*ah
= sc
->sc_ah
;
968 struct ath_common
*common
= ath9k_hw_common(ah
);
969 struct ieee80211_hw
*hw
= sc
->hw
;
971 struct ath_rx_status rs
;
972 enum ath9k_rx_qtype qtype
;
973 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
977 dma_addr_t new_buf_addr
;
980 dma_type
= DMA_BIDIRECTIONAL
;
982 dma_type
= DMA_FROM_DEVICE
;
984 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
986 tsf
= ath9k_hw_gettsf64(ah
);
989 bool decrypt_error
= false;
991 memset(&rs
, 0, sizeof(rs
));
993 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
995 bf
= ath_get_next_rx_buf(sc
, &rs
);
1005 * Take frame header from the first fragment and RX status from
1009 hdr_skb
= sc
->rx
.frag
;
1013 rxs
= IEEE80211_SKB_RXCB(hdr_skb
);
1014 memset(rxs
, 0, sizeof(struct ieee80211_rx_status
));
1016 retval
= ath9k_rx_skb_preprocess(sc
, hdr_skb
, &rs
, rxs
,
1017 &decrypt_error
, tsf
);
1019 goto requeue_drop_frag
;
1021 /* Ensure we always have an skb to requeue once we are done
1022 * processing the current buffer's skb */
1023 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1025 /* If there is no memory we ignore the current RX'd frame,
1026 * tell hardware it can give us a new frame using the old
1027 * skb and put it at the tail of the sc->rx.rxbuf list for
1030 RX_STAT_INC(rx_oom_err
);
1031 goto requeue_drop_frag
;
1034 /* We will now give hardware our shiny new allocated skb */
1035 new_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1036 common
->rx_bufsize
, dma_type
);
1037 if (unlikely(dma_mapping_error(sc
->dev
, new_buf_addr
))) {
1038 dev_kfree_skb_any(requeue_skb
);
1039 goto requeue_drop_frag
;
1042 /* Unmap the frame */
1043 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1044 common
->rx_bufsize
, dma_type
);
1046 bf
->bf_mpdu
= requeue_skb
;
1047 bf
->bf_buf_addr
= new_buf_addr
;
1049 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1050 if (ah
->caps
.rx_status_len
)
1051 skb_pull(skb
, ah
->caps
.rx_status_len
);
1054 ath9k_cmn_rx_skb_postprocess(common
, hdr_skb
, &rs
,
1055 rxs
, decrypt_error
);
1058 RX_STAT_INC(rx_frags
);
1060 * rs_more indicates chained descriptors which can be
1061 * used to link buffers together for a sort of
1062 * scatter-gather operation.
1065 /* too many fragments - cannot handle frame */
1066 dev_kfree_skb_any(sc
->rx
.frag
);
1067 dev_kfree_skb_any(skb
);
1068 RX_STAT_INC(rx_too_many_frags_err
);
1076 int space
= skb
->len
- skb_tailroom(hdr_skb
);
1078 if (pskb_expand_head(hdr_skb
, 0, space
, GFP_ATOMIC
) < 0) {
1080 RX_STAT_INC(rx_oom_err
);
1081 goto requeue_drop_frag
;
1086 skb_copy_from_linear_data(skb
, skb_put(hdr_skb
, skb
->len
),
1088 dev_kfree_skb_any(skb
);
1092 if (rxs
->flag
& RX_FLAG_MMIC_STRIPPED
)
1093 skb_trim(skb
, skb
->len
- 8);
1095 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1096 if ((sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1098 PS_WAIT_FOR_PSPOLL_DATA
)) ||
1099 ath9k_check_auto_sleep(sc
))
1100 ath_rx_ps(sc
, skb
, rs
.is_mybeacon
);
1101 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1103 ath9k_antenna_check(sc
, &rs
);
1104 ath9k_apply_ampdu_details(sc
, &rs
, rxs
);
1105 ath_debug_rate_stats(sc
, &rs
, skb
);
1107 ieee80211_rx(hw
, skb
);
1111 dev_kfree_skb_any(sc
->rx
.frag
);
1115 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1120 ath_rx_edma_buf_link(sc
, qtype
);
1122 ath_rx_buf_relink(sc
, bf
);
1127 if (!(ah
->imask
& ATH9K_INT_RXEOL
)) {
1128 ah
->imask
|= (ATH9K_INT_RXEOL
| ATH9K_INT_RXORN
);
1129 ath9k_hw_set_interrupts(ah
);