2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static struct ieee80211_hw
* ath_get_virt_hw(struct ath_softc
*sc
,
20 struct ieee80211_hdr
*hdr
)
22 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
25 spin_lock_bh(&sc
->wiphy_lock
);
26 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
27 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
30 if (compare_ether_addr(hdr
->addr1
, aphy
->hw
->wiphy
->perm_addr
)
36 spin_unlock_bh(&sc
->wiphy_lock
);
41 * Setup and link descriptors.
43 * 11N: we can no longer afford to self link the last descriptor.
44 * MAC acknowledges BA status as long as it copies frames to host
45 * buffer (or rx fifo). This can incorrectly acknowledge packets
46 * to a sender if last desc is self-linked.
48 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
50 struct ath_hw
*ah
= sc
->sc_ah
;
57 ds
->ds_link
= 0; /* link to null */
58 ds
->ds_data
= bf
->bf_buf_addr
;
60 /* virtual addr of the beginning of the buffer. */
63 ds
->ds_vdata
= skb
->data
;
65 /* setup rx descriptors. The rx.bufsize here tells the harware
66 * how much data it can DMA to us and that we are prepared
68 ath9k_hw_setuprxdesc(ah
, ds
,
72 if (sc
->rx
.rxlink
== NULL
)
73 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
75 *sc
->rx
.rxlink
= bf
->bf_daddr
;
77 sc
->rx
.rxlink
= &ds
->ds_link
;
81 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
83 /* XXX block beacon interrupts */
84 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
85 sc
->rx
.defant
= antenna
;
86 sc
->rx
.rxotherant
= 0;
90 * Extend 15-bit time stamp from rx descriptor to
91 * a full 64-bit TSF using the current h/w TSF.
93 static u64
ath_extend_tsf(struct ath_softc
*sc
, u32 rstamp
)
97 tsf
= ath9k_hw_gettsf64(sc
->sc_ah
);
98 if ((tsf
& 0x7fff) < rstamp
)
100 return (tsf
& ~0x7fff) | rstamp
;
103 static struct sk_buff
*ath_rxbuf_alloc(struct ath_softc
*sc
, u32 len
)
109 * Cache-line-align. This is important (for the
110 * 5210 at least) as not doing so causes bogus data
114 /* Note: the kernel can allocate a value greater than
115 * what we ask it to give us. We really only need 4 KB as that
116 * is this hardware supports and in fact we need at least 3849
117 * as that is the MAX AMSDU size this hardware supports.
118 * Unfortunately this means we may get 8 KB here from the
119 * kernel... and that is actually what is observed on some
121 skb
= dev_alloc_skb(len
+ sc
->cachelsz
- 1);
123 off
= ((unsigned long) skb
->data
) % sc
->cachelsz
;
125 skb_reserve(skb
, sc
->cachelsz
- off
);
127 DPRINTF(sc
, ATH_DBG_FATAL
,
128 "skbuff alloc of size %u failed\n", len
);
136 * For Decrypt or Demic errors, we only mark packet status here and always push
137 * up the frame up to let mac80211 handle the actual error case, be it no
138 * decryption key or real decryption error. This let us keep statistics there.
140 static int ath_rx_prepare(struct sk_buff
*skb
, struct ath_desc
*ds
,
141 struct ieee80211_rx_status
*rx_status
, bool *decrypt_error
,
142 struct ath_softc
*sc
)
144 struct ieee80211_hdr
*hdr
;
147 struct ieee80211_hw
*hw
;
149 hdr
= (struct ieee80211_hdr
*)skb
->data
;
150 fc
= hdr
->frame_control
;
151 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
152 hw
= ath_get_virt_hw(sc
, hdr
);
154 if (ds
->ds_rxstat
.rs_more
) {
156 * Frame spans multiple descriptors; this cannot happen yet
157 * as we don't support jumbograms. If not in monitor mode,
158 * discard the frame. Enable this if you want to see
159 * error frames in Monitor mode.
161 if (sc
->sc_ah
->opmode
!= NL80211_IFTYPE_MONITOR
)
163 } else if (ds
->ds_rxstat
.rs_status
!= 0) {
164 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_CRC
)
165 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
166 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_PHY
)
169 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_DECRYPT
) {
170 *decrypt_error
= true;
171 } else if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_MIC
) {
172 if (ieee80211_is_ctl(fc
))
174 * Sometimes, we get invalid
175 * MIC failures on valid control frames.
176 * Remove these mic errors.
178 ds
->ds_rxstat
.rs_status
&= ~ATH9K_RXERR_MIC
;
180 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
183 * Reject error frames with the exception of
184 * decryption and MIC failures. For monitor mode,
185 * we also ignore the CRC error.
187 if (sc
->sc_ah
->opmode
== NL80211_IFTYPE_MONITOR
) {
188 if (ds
->ds_rxstat
.rs_status
&
189 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
193 if (ds
->ds_rxstat
.rs_status
&
194 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
200 ratecode
= ds
->ds_rxstat
.rs_rate
;
202 if (ratecode
& 0x80) {
204 rx_status
->flag
|= RX_FLAG_HT
;
205 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
)
206 rx_status
->flag
|= RX_FLAG_40MHZ
;
207 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_GI
)
208 rx_status
->flag
|= RX_FLAG_SHORT_GI
;
209 rx_status
->rate_idx
= ratecode
& 0x7f;
211 int i
= 0, cur_band
, n_rates
;
213 cur_band
= hw
->conf
.channel
->band
;
214 n_rates
= sc
->sbands
[cur_band
].n_bitrates
;
216 for (i
= 0; i
< n_rates
; i
++) {
217 if (sc
->sbands
[cur_band
].bitrates
[i
].hw_value
==
219 rx_status
->rate_idx
= i
;
223 if (sc
->sbands
[cur_band
].bitrates
[i
].hw_value_short
==
225 rx_status
->rate_idx
= i
;
226 rx_status
->flag
|= RX_FLAG_SHORTPRE
;
232 rx_status
->mactime
= ath_extend_tsf(sc
, ds
->ds_rxstat
.rs_tstamp
);
233 rx_status
->band
= hw
->conf
.channel
->band
;
234 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
235 rx_status
->noise
= sc
->ani
.noise_floor
;
236 rx_status
->signal
= rx_status
->noise
+ ds
->ds_rxstat
.rs_rssi
;
237 rx_status
->antenna
= ds
->ds_rxstat
.rs_antenna
;
239 /* at 45 you will be able to use MCS 15 reliably. A more elaborate
240 * scheme can be used here but it requires tables of SNR/throughput for
241 * each possible mode used. */
242 rx_status
->qual
= ds
->ds_rxstat
.rs_rssi
* 100 / 45;
244 /* rssi can be more than 45 though, anything above that
245 * should be considered at 100% */
246 if (rx_status
->qual
> 100)
247 rx_status
->qual
= 100;
249 rx_status
->flag
|= RX_FLAG_TSFT
;
256 static void ath_opmode_init(struct ath_softc
*sc
)
258 struct ath_hw
*ah
= sc
->sc_ah
;
261 /* configure rx filter */
262 rfilt
= ath_calcrxfilter(sc
);
263 ath9k_hw_setrxfilter(ah
, rfilt
);
265 /* configure bssid mask */
266 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
)
267 ath9k_hw_setbssidmask(sc
);
269 /* configure operational mode */
270 ath9k_hw_setopmode(ah
);
272 /* Handle any link-level address change. */
273 ath9k_hw_setmac(ah
, sc
->sc_ah
->macaddr
);
275 /* calculate and install multicast filter */
276 mfilt
[0] = mfilt
[1] = ~0;
277 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
280 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
287 spin_lock_init(&sc
->rx
.rxflushlock
);
288 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
289 spin_lock_init(&sc
->rx
.rxbuflock
);
291 sc
->rx
.bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
295 DPRINTF(sc
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
296 sc
->cachelsz
, sc
->rx
.bufsize
);
298 /* Initialize rx descriptors */
300 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
303 DPRINTF(sc
, ATH_DBG_FATAL
,
304 "failed to allocate rx descriptors: %d\n", error
);
308 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
309 skb
= ath_rxbuf_alloc(sc
, sc
->rx
.bufsize
);
316 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
319 if (unlikely(dma_mapping_error(sc
->dev
,
321 dev_kfree_skb_any(skb
);
323 DPRINTF(sc
, ATH_DBG_CONFIG
,
324 "dma_mapping_error() on RX init\n");
328 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
330 sc
->rx
.rxlink
= NULL
;
340 void ath_rx_cleanup(struct ath_softc
*sc
)
345 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
351 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
352 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
356 * Calculate the receive filter according to the
357 * operating mode and state:
359 * o always accept unicast, broadcast, and multicast traffic
360 * o maintain current state of phy error reception (the hal
361 * may enable phy error frames for noise immunity work)
362 * o probe request frames are accepted only when operating in
363 * hostap, adhoc, or monitor modes
364 * o enable promiscuous mode according to the interface state
366 * - when operating in adhoc mode so the 802.11 layer creates
367 * node table entries for peers,
368 * - when operating in station mode for collecting rssi data when
369 * the station is otherwise quiet, or
370 * - when operating as a repeater so we see repeater-sta beacons
374 u32
ath_calcrxfilter(struct ath_softc
*sc
)
376 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
380 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
381 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
382 | ATH9K_RX_FILTER_MCAST
;
384 /* If not a STA, enable processing of Probe Requests */
385 if (sc
->sc_ah
->opmode
!= NL80211_IFTYPE_STATION
)
386 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
388 /* Can't set HOSTAP into promiscous mode */
389 if (((sc
->sc_ah
->opmode
!= NL80211_IFTYPE_AP
) &&
390 (sc
->rx
.rxfilter
& FIF_PROMISC_IN_BSS
)) ||
391 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_MONITOR
)) {
392 rfilt
|= ATH9K_RX_FILTER_PROM
;
393 /* ??? To prevent from sending ACK */
394 rfilt
&= ~ATH9K_RX_FILTER_UCAST
;
397 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
398 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
400 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
401 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
402 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
404 rfilt
|= ATH9K_RX_FILTER_BEACON
;
406 /* If in HOSTAP mode, want to enable reception of PSPOLL frames */
407 if (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
)
408 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
412 #undef RX_FILTER_PRESERVE
415 int ath_startrecv(struct ath_softc
*sc
)
417 struct ath_hw
*ah
= sc
->sc_ah
;
418 struct ath_buf
*bf
, *tbf
;
420 spin_lock_bh(&sc
->rx
.rxbuflock
);
421 if (list_empty(&sc
->rx
.rxbuf
))
424 sc
->rx
.rxlink
= NULL
;
425 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
426 ath_rx_buf_link(sc
, bf
);
429 /* We could have deleted elements so the list may be empty now */
430 if (list_empty(&sc
->rx
.rxbuf
))
433 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
434 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
438 spin_unlock_bh(&sc
->rx
.rxbuflock
);
440 ath9k_hw_startpcureceive(ah
);
445 bool ath_stoprecv(struct ath_softc
*sc
)
447 struct ath_hw
*ah
= sc
->sc_ah
;
450 ath9k_hw_stoppcurecv(ah
);
451 ath9k_hw_setrxfilter(ah
, 0);
452 stopped
= ath9k_hw_stopdmarecv(ah
);
453 sc
->rx
.rxlink
= NULL
;
458 void ath_flushrecv(struct ath_softc
*sc
)
460 spin_lock_bh(&sc
->rx
.rxflushlock
);
461 sc
->sc_flags
|= SC_OP_RXFLUSH
;
462 ath_rx_tasklet(sc
, 1);
463 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
464 spin_unlock_bh(&sc
->rx
.rxflushlock
);
467 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
)
469 #define PA2DESC(_sc, _pa) \
470 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
471 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
475 struct sk_buff
*skb
= NULL
, *requeue_skb
;
476 struct ieee80211_rx_status rx_status
;
477 struct ath_hw
*ah
= sc
->sc_ah
;
478 struct ieee80211_hdr
*hdr
;
479 int hdrlen
, padsize
, retval
;
480 bool decrypt_error
= false;
483 spin_lock_bh(&sc
->rx
.rxbuflock
);
486 /* If handling rx interrupt and flush is in progress => exit */
487 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
490 if (list_empty(&sc
->rx
.rxbuf
)) {
491 sc
->rx
.rxlink
= NULL
;
495 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
499 * Must provide the virtual address of the current
500 * descriptor, the physical address, and the virtual
501 * address of the next descriptor in the h/w chain.
502 * This allows the HAL to look ahead to see if the
503 * hardware is done with a descriptor by checking the
504 * done bit in the following descriptor and the address
505 * of the current descriptor the DMA engine is working
506 * on. All this is necessary because of our use of
507 * a self-linked list to avoid rx overruns.
509 retval
= ath9k_hw_rxprocdesc(ah
, ds
,
511 PA2DESC(sc
, ds
->ds_link
),
513 if (retval
== -EINPROGRESS
) {
515 struct ath_desc
*tds
;
517 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
518 sc
->rx
.rxlink
= NULL
;
522 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
525 * On some hardware the descriptor status words could
526 * get corrupted, including the done bit. Because of
527 * this, check if the next descriptor's done bit is
530 * If the next descriptor's done bit is set, the current
531 * descriptor has been corrupted. Force s/w to discard
532 * this descriptor and continue...
536 retval
= ath9k_hw_rxprocdesc(ah
, tds
, tbf
->bf_daddr
,
537 PA2DESC(sc
, tds
->ds_link
), 0);
538 if (retval
== -EINPROGRESS
) {
548 * Synchronize the DMA transfer with CPU before
549 * 1. accessing the frame
550 * 2. requeueing the same buffer to h/w
552 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
557 * If we're asked to flush receive queue, directly
558 * chain it back at the queue without processing it.
563 if (!ds
->ds_rxstat
.rs_datalen
)
566 /* The status portion of the descriptor could get corrupted. */
567 if (sc
->rx
.bufsize
< ds
->ds_rxstat
.rs_datalen
)
570 if (!ath_rx_prepare(skb
, ds
, &rx_status
, &decrypt_error
, sc
))
573 /* Ensure we always have an skb to requeue once we are done
574 * processing the current buffer's skb */
575 requeue_skb
= ath_rxbuf_alloc(sc
, sc
->rx
.bufsize
);
577 /* If there is no memory we ignore the current RX'd frame,
578 * tell hardware it can give us a new frame using the old
579 * skb and put it at the tail of the sc->rx.rxbuf list for
584 /* Unmap the frame */
585 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
589 skb_put(skb
, ds
->ds_rxstat
.rs_datalen
);
590 skb
->protocol
= cpu_to_be16(ETH_P_CONTROL
);
592 /* see if any padding is done by the hw and remove it */
593 hdr
= (struct ieee80211_hdr
*)skb
->data
;
594 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
596 /* The MAC header is padded to have 32-bit boundary if the
597 * packet payload is non-zero. The general calculation for
598 * padsize would take into account odd header lengths:
599 * padsize = (4 - hdrlen % 4) % 4; However, since only
600 * even-length headers are used, padding can only be 0 or 2
601 * bytes and we can optimize this a bit. In addition, we must
602 * not try to remove padding from short control frames that do
603 * not have payload. */
604 padsize
= hdrlen
& 3;
605 if (padsize
&& hdrlen
>= 24) {
606 memmove(skb
->data
+ padsize
, skb
->data
, hdrlen
);
607 skb_pull(skb
, padsize
);
610 keyix
= ds
->ds_rxstat
.rs_keyix
;
612 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
) {
613 rx_status
.flag
|= RX_FLAG_DECRYPTED
;
614 } else if ((le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_PROTECTED
)
615 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
616 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
618 if (test_bit(keyix
, sc
->keymap
))
619 rx_status
.flag
|= RX_FLAG_DECRYPTED
;
621 if (ah
->sw_mgmt_crypto
&&
622 (rx_status
.flag
& RX_FLAG_DECRYPTED
) &&
623 ieee80211_is_mgmt(hdr
->frame_control
)) {
624 /* Use software decrypt for management frames. */
625 rx_status
.flag
&= ~RX_FLAG_DECRYPTED
;
628 /* Send the frame to mac80211 */
629 if (hdr
->addr1
[5] & 0x01) {
632 * Deliver broadcast/multicast frames to all suitable
635 /* TODO: filter based on channel configuration */
636 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
637 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
638 struct sk_buff
*nskb
;
641 nskb
= skb_copy(skb
, GFP_ATOMIC
);
643 __ieee80211_rx(aphy
->hw
, nskb
,
646 __ieee80211_rx(sc
->hw
, skb
, &rx_status
);
648 /* Deliver unicast frames based on receiver address */
649 __ieee80211_rx(ath_get_virt_hw(sc
, hdr
), skb
,
653 /* We will now give hardware our shiny new allocated skb */
654 bf
->bf_mpdu
= requeue_skb
;
655 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
658 if (unlikely(dma_mapping_error(sc
->dev
,
660 dev_kfree_skb_any(requeue_skb
);
662 DPRINTF(sc
, ATH_DBG_CONFIG
,
663 "dma_mapping_error() on RX\n");
666 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
669 * change the default rx antenna if rx diversity chooses the
670 * other antenna 3 times in a row.
672 if (sc
->rx
.defant
!= ds
->ds_rxstat
.rs_antenna
) {
673 if (++sc
->rx
.rxotherant
>= 3)
674 ath_setdefantenna(sc
, ds
->ds_rxstat
.rs_antenna
);
676 sc
->rx
.rxotherant
= 0;
679 if (ieee80211_is_beacon(hdr
->frame_control
) &&
680 (sc
->sc_flags
& SC_OP_WAIT_FOR_BEACON
)) {
681 sc
->sc_flags
&= ~SC_OP_WAIT_FOR_BEACON
;
682 ath9k_hw_setpower(sc
->sc_ah
, ATH9K_PM_NETWORK_SLEEP
);
685 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
686 ath_rx_buf_link(sc
, bf
);
689 spin_unlock_bh(&sc
->rx
.rxbuflock
);