Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
cee075a2 | 2 | * Copyright (c) 2008-2009 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
394cf0a1 | 17 | #include "ath9k.h" |
b622a720 | 18 | #include "ar9003_mac.h" |
f078f209 | 19 | |
b5c80475 FF |
20 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) |
21 | ||
ededf1f8 VT |
22 | static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) |
23 | { | |
24 | return sc->ps_enabled && | |
25 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); | |
26 | } | |
27 | ||
bce048d7 JM |
28 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, |
29 | struct ieee80211_hdr *hdr) | |
30 | { | |
c52f33d0 JM |
31 | struct ieee80211_hw *hw = sc->pri_wiphy->hw; |
32 | int i; | |
33 | ||
34 | spin_lock_bh(&sc->wiphy_lock); | |
35 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
36 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
37 | if (aphy == NULL) | |
38 | continue; | |
39 | if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) | |
40 | == 0) { | |
41 | hw = aphy->hw; | |
42 | break; | |
43 | } | |
44 | } | |
45 | spin_unlock_bh(&sc->wiphy_lock); | |
46 | return hw; | |
bce048d7 JM |
47 | } |
48 | ||
f078f209 LR |
49 | /* |
50 | * Setup and link descriptors. | |
51 | * | |
52 | * 11N: we can no longer afford to self link the last descriptor. | |
53 | * MAC acknowledges BA status as long as it copies frames to host | |
54 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
55 | * to a sender if last desc is self-linked. | |
f078f209 | 56 | */ |
f078f209 LR |
57 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
58 | { | |
cbe61d8a | 59 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 60 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
61 | struct ath_desc *ds; |
62 | struct sk_buff *skb; | |
63 | ||
64 | ATH_RXBUF_RESET(bf); | |
65 | ||
66 | ds = bf->bf_desc; | |
be0418ad | 67 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
68 | ds->ds_data = bf->bf_buf_addr; |
69 | ||
be0418ad | 70 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 71 | skb = bf->bf_mpdu; |
9680e8a3 | 72 | BUG_ON(skb == NULL); |
f078f209 LR |
73 | ds->ds_vdata = skb->data; |
74 | ||
cc861f74 LR |
75 | /* |
76 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 77 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
78 | * to process |
79 | */ | |
b77f483f | 80 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 81 | common->rx_bufsize, |
f078f209 LR |
82 | 0); |
83 | ||
b77f483f | 84 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
85 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
86 | else | |
b77f483f | 87 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 88 | |
b77f483f | 89 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
90 | ath9k_hw_rxena(ah); |
91 | } | |
92 | ||
ff37e337 S |
93 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
94 | { | |
95 | /* XXX block beacon interrupts */ | |
96 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
97 | sc->rx.defant = antenna; |
98 | sc->rx.rxotherant = 0; | |
ff37e337 S |
99 | } |
100 | ||
f078f209 LR |
101 | static void ath_opmode_init(struct ath_softc *sc) |
102 | { | |
cbe61d8a | 103 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
104 | struct ath_common *common = ath9k_hw_common(ah); |
105 | ||
f078f209 LR |
106 | u32 rfilt, mfilt[2]; |
107 | ||
108 | /* configure rx filter */ | |
109 | rfilt = ath_calcrxfilter(sc); | |
110 | ath9k_hw_setrxfilter(ah, rfilt); | |
111 | ||
112 | /* configure bssid mask */ | |
2660b81a | 113 | if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) |
13b81559 | 114 | ath_hw_setbssidmask(common); |
f078f209 LR |
115 | |
116 | /* configure operational mode */ | |
117 | ath9k_hw_setopmode(ah); | |
118 | ||
119 | /* Handle any link-level address change. */ | |
1510718d | 120 | ath9k_hw_setmac(ah, common->macaddr); |
f078f209 LR |
121 | |
122 | /* calculate and install multicast filter */ | |
123 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 124 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
125 | } |
126 | ||
b5c80475 FF |
127 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
128 | enum ath9k_rx_qtype qtype) | |
f078f209 | 129 | { |
b5c80475 FF |
130 | struct ath_hw *ah = sc->sc_ah; |
131 | struct ath_rx_edma *rx_edma; | |
f078f209 LR |
132 | struct sk_buff *skb; |
133 | struct ath_buf *bf; | |
f078f209 | 134 | |
b5c80475 FF |
135 | rx_edma = &sc->rx.rx_edma[qtype]; |
136 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
137 | return false; | |
f078f209 | 138 | |
b5c80475 FF |
139 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
140 | list_del_init(&bf->list); | |
f078f209 | 141 | |
b5c80475 FF |
142 | skb = bf->bf_mpdu; |
143 | ||
144 | ATH_RXBUF_RESET(bf); | |
145 | memset(skb->data, 0, ah->caps.rx_status_len); | |
146 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
147 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 148 | |
b5c80475 FF |
149 | SKB_CB_ATHBUF(skb) = bf; |
150 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
151 | skb_queue_tail(&rx_edma->rx_fifo, skb); | |
f078f209 | 152 | |
b5c80475 FF |
153 | return true; |
154 | } | |
155 | ||
156 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
157 | enum ath9k_rx_qtype qtype, int size) | |
158 | { | |
b5c80475 FF |
159 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
160 | u32 nbuf = 0; | |
161 | ||
b5c80475 FF |
162 | if (list_empty(&sc->rx.rxbuf)) { |
163 | ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); | |
164 | return; | |
797fe5cb | 165 | } |
f078f209 | 166 | |
b5c80475 FF |
167 | while (!list_empty(&sc->rx.rxbuf)) { |
168 | nbuf++; | |
169 | ||
170 | if (!ath_rx_edma_buf_link(sc, qtype)) | |
171 | break; | |
172 | ||
173 | if (nbuf >= size) | |
174 | break; | |
175 | } | |
176 | } | |
177 | ||
178 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
179 | enum ath9k_rx_qtype qtype) | |
180 | { | |
181 | struct ath_buf *bf; | |
182 | struct ath_rx_edma *rx_edma; | |
183 | struct sk_buff *skb; | |
184 | ||
185 | rx_edma = &sc->rx.rx_edma[qtype]; | |
186 | ||
187 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | |
188 | bf = SKB_CB_ATHBUF(skb); | |
189 | BUG_ON(!bf); | |
190 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
191 | } | |
192 | } | |
193 | ||
194 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
195 | { | |
196 | struct ath_buf *bf; | |
197 | ||
198 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
199 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
200 | ||
797fe5cb | 201 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
b5c80475 FF |
202 | if (bf->bf_mpdu) |
203 | dev_kfree_skb_any(bf->bf_mpdu); | |
204 | } | |
205 | ||
206 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
207 | ||
208 | kfree(sc->rx.rx_bufptr); | |
209 | sc->rx.rx_bufptr = NULL; | |
210 | } | |
211 | ||
212 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
213 | { | |
214 | skb_queue_head_init(&rx_edma->rx_fifo); | |
215 | skb_queue_head_init(&rx_edma->rx_buffers); | |
216 | rx_edma->rx_fifo_hwsize = size; | |
217 | } | |
218 | ||
219 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
220 | { | |
221 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
222 | struct ath_hw *ah = sc->sc_ah; | |
223 | struct sk_buff *skb; | |
224 | struct ath_buf *bf; | |
225 | int error = 0, i; | |
226 | u32 size; | |
227 | ||
228 | ||
229 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + | |
230 | ah->caps.rx_status_len, | |
231 | min(common->cachelsz, (u16)64)); | |
232 | ||
233 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - | |
234 | ah->caps.rx_status_len); | |
235 | ||
236 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
237 | ah->caps.rx_lp_qdepth); | |
238 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
239 | ah->caps.rx_hp_qdepth); | |
240 | ||
241 | size = sizeof(struct ath_buf) * nbufs; | |
242 | bf = kzalloc(size, GFP_KERNEL); | |
243 | if (!bf) | |
244 | return -ENOMEM; | |
245 | ||
246 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
247 | sc->rx.rx_bufptr = bf; | |
248 | ||
249 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 250 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 251 | if (!skb) { |
797fe5cb | 252 | error = -ENOMEM; |
b5c80475 | 253 | goto rx_init_fail; |
f078f209 | 254 | } |
f078f209 | 255 | |
b5c80475 | 256 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 257 | bf->bf_mpdu = skb; |
b5c80475 | 258 | |
797fe5cb | 259 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 260 | common->rx_bufsize, |
b5c80475 | 261 | DMA_BIDIRECTIONAL); |
797fe5cb | 262 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
263 | bf->bf_buf_addr))) { |
264 | dev_kfree_skb_any(skb); | |
265 | bf->bf_mpdu = NULL; | |
266 | ath_print(common, ATH_DBG_FATAL, | |
267 | "dma_mapping_error() on RX init\n"); | |
268 | error = -ENOMEM; | |
269 | goto rx_init_fail; | |
270 | } | |
271 | ||
272 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
273 | } | |
274 | ||
275 | return 0; | |
276 | ||
277 | rx_init_fail: | |
278 | ath_rx_edma_cleanup(sc); | |
279 | return error; | |
280 | } | |
281 | ||
282 | static void ath_edma_start_recv(struct ath_softc *sc) | |
283 | { | |
284 | spin_lock_bh(&sc->rx.rxbuflock); | |
285 | ||
286 | ath9k_hw_rxena(sc->sc_ah); | |
287 | ||
288 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | |
289 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | |
290 | ||
291 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | |
292 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | |
293 | ||
294 | spin_unlock_bh(&sc->rx.rxbuflock); | |
295 | ||
296 | ath_opmode_init(sc); | |
297 | ||
40346b66 | 298 | ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING)); |
b5c80475 FF |
299 | } |
300 | ||
301 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
302 | { | |
303 | spin_lock_bh(&sc->rx.rxbuflock); | |
304 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
305 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
306 | spin_unlock_bh(&sc->rx.rxbuflock); | |
307 | } | |
308 | ||
309 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
310 | { | |
311 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
312 | struct sk_buff *skb; | |
313 | struct ath_buf *bf; | |
314 | int error = 0; | |
315 | ||
316 | spin_lock_init(&sc->rx.rxflushlock); | |
317 | sc->sc_flags &= ~SC_OP_RXFLUSH; | |
318 | spin_lock_init(&sc->rx.rxbuflock); | |
319 | ||
320 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | |
321 | return ath_rx_edma_init(sc, nbufs); | |
322 | } else { | |
323 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | |
324 | min(common->cachelsz, (u16)64)); | |
325 | ||
326 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | |
327 | common->cachelsz, common->rx_bufsize); | |
328 | ||
329 | /* Initialize rx descriptors */ | |
330 | ||
331 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
4adfcded | 332 | "rx", nbufs, 1, 0); |
b5c80475 | 333 | if (error != 0) { |
c46917bb | 334 | ath_print(common, ATH_DBG_FATAL, |
b5c80475 FF |
335 | "failed to allocate rx descriptors: %d\n", |
336 | error); | |
797fe5cb S |
337 | goto err; |
338 | } | |
b5c80475 FF |
339 | |
340 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
341 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
342 | GFP_KERNEL); | |
343 | if (skb == NULL) { | |
344 | error = -ENOMEM; | |
345 | goto err; | |
346 | } | |
347 | ||
348 | bf->bf_mpdu = skb; | |
349 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
350 | common->rx_bufsize, | |
351 | DMA_FROM_DEVICE); | |
352 | if (unlikely(dma_mapping_error(sc->dev, | |
353 | bf->bf_buf_addr))) { | |
354 | dev_kfree_skb_any(skb); | |
355 | bf->bf_mpdu = NULL; | |
356 | ath_print(common, ATH_DBG_FATAL, | |
357 | "dma_mapping_error() on RX init\n"); | |
358 | error = -ENOMEM; | |
359 | goto err; | |
360 | } | |
361 | bf->bf_dmacontext = bf->bf_buf_addr; | |
362 | } | |
363 | sc->rx.rxlink = NULL; | |
797fe5cb | 364 | } |
f078f209 | 365 | |
797fe5cb | 366 | err: |
f078f209 LR |
367 | if (error) |
368 | ath_rx_cleanup(sc); | |
369 | ||
370 | return error; | |
371 | } | |
372 | ||
f078f209 LR |
373 | void ath_rx_cleanup(struct ath_softc *sc) |
374 | { | |
cc861f74 LR |
375 | struct ath_hw *ah = sc->sc_ah; |
376 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 LR |
377 | struct sk_buff *skb; |
378 | struct ath_buf *bf; | |
379 | ||
b5c80475 FF |
380 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
381 | ath_rx_edma_cleanup(sc); | |
382 | return; | |
383 | } else { | |
384 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
385 | skb = bf->bf_mpdu; | |
386 | if (skb) { | |
387 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
388 | common->rx_bufsize, | |
389 | DMA_FROM_DEVICE); | |
390 | dev_kfree_skb(skb); | |
391 | } | |
051b9191 | 392 | } |
f078f209 | 393 | |
b5c80475 FF |
394 | if (sc->rx.rxdma.dd_desc_len != 0) |
395 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | |
396 | } | |
f078f209 LR |
397 | } |
398 | ||
399 | /* | |
400 | * Calculate the receive filter according to the | |
401 | * operating mode and state: | |
402 | * | |
403 | * o always accept unicast, broadcast, and multicast traffic | |
404 | * o maintain current state of phy error reception (the hal | |
405 | * may enable phy error frames for noise immunity work) | |
406 | * o probe request frames are accepted only when operating in | |
407 | * hostap, adhoc, or monitor modes | |
408 | * o enable promiscuous mode according to the interface state | |
409 | * o accept beacons: | |
410 | * - when operating in adhoc mode so the 802.11 layer creates | |
411 | * node table entries for peers, | |
412 | * - when operating in station mode for collecting rssi data when | |
413 | * the station is otherwise quiet, or | |
414 | * - when operating as a repeater so we see repeater-sta beacons | |
415 | * - when scanning | |
416 | */ | |
417 | ||
418 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
419 | { | |
420 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 421 | |
f078f209 LR |
422 | u32 rfilt; |
423 | ||
424 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
425 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
426 | | ATH9K_RX_FILTER_MCAST; | |
427 | ||
428 | /* If not a STA, enable processing of Probe Requests */ | |
2660b81a | 429 | if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) |
f078f209 LR |
430 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
431 | ||
217ba9da JM |
432 | /* |
433 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
434 | * mode interface or when in monitor mode. AP mode does not need this | |
435 | * since it receives all in-BSS frames anyway. | |
436 | */ | |
2660b81a | 437 | if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && |
b77f483f | 438 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
217ba9da | 439 | (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) |
f078f209 | 440 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 441 | |
d42c6b71 S |
442 | if (sc->rx.rxfilter & FIF_CONTROL) |
443 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
444 | ||
dbaaa147 VT |
445 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
446 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) | |
447 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
448 | else | |
f078f209 LR |
449 | rfilt |= ATH9K_RX_FILTER_BEACON; |
450 | ||
66afad01 SB |
451 | if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || |
452 | AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && | |
453 | (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && | |
454 | (sc->rx.rxfilter & FIF_PSPOLL)) | |
dbaaa147 | 455 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 456 | |
7ea310be S |
457 | if (conf_is_ht(&sc->hw->conf)) |
458 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
459 | ||
5eb6ba83 | 460 | if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
b93bce2a JM |
461 | /* TODO: only needed if more than one BSSID is in use in |
462 | * station/adhoc mode */ | |
5eb6ba83 JC |
463 | /* The following may also be needed for other older chips */ |
464 | if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) | |
465 | rfilt |= ATH9K_RX_FILTER_PROM; | |
b93bce2a JM |
466 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
467 | } | |
468 | ||
f078f209 | 469 | return rfilt; |
7dcfdcd9 | 470 | |
f078f209 LR |
471 | #undef RX_FILTER_PRESERVE |
472 | } | |
473 | ||
f078f209 LR |
474 | int ath_startrecv(struct ath_softc *sc) |
475 | { | |
cbe61d8a | 476 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
477 | struct ath_buf *bf, *tbf; |
478 | ||
b5c80475 FF |
479 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
480 | ath_edma_start_recv(sc); | |
481 | return 0; | |
482 | } | |
483 | ||
b77f483f S |
484 | spin_lock_bh(&sc->rx.rxbuflock); |
485 | if (list_empty(&sc->rx.rxbuf)) | |
f078f209 LR |
486 | goto start_recv; |
487 | ||
b77f483f S |
488 | sc->rx.rxlink = NULL; |
489 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
490 | ath_rx_buf_link(sc, bf); |
491 | } | |
492 | ||
493 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 494 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
495 | goto start_recv; |
496 | ||
b77f483f | 497 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 498 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 499 | ath9k_hw_rxena(ah); |
f078f209 LR |
500 | |
501 | start_recv: | |
b77f483f | 502 | spin_unlock_bh(&sc->rx.rxbuflock); |
be0418ad | 503 | ath_opmode_init(sc); |
40346b66 | 504 | ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING)); |
be0418ad | 505 | |
f078f209 LR |
506 | return 0; |
507 | } | |
508 | ||
f078f209 LR |
509 | bool ath_stoprecv(struct ath_softc *sc) |
510 | { | |
cbe61d8a | 511 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
512 | bool stopped; |
513 | ||
be0418ad S |
514 | ath9k_hw_stoppcurecv(ah); |
515 | ath9k_hw_setrxfilter(ah, 0); | |
516 | stopped = ath9k_hw_stopdmarecv(ah); | |
b5c80475 FF |
517 | |
518 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
519 | ath_edma_stop_recv(sc); | |
520 | else | |
521 | sc->rx.rxlink = NULL; | |
be0418ad | 522 | |
f078f209 LR |
523 | return stopped; |
524 | } | |
525 | ||
f078f209 LR |
526 | void ath_flushrecv(struct ath_softc *sc) |
527 | { | |
b77f483f | 528 | spin_lock_bh(&sc->rx.rxflushlock); |
98deeea0 | 529 | sc->sc_flags |= SC_OP_RXFLUSH; |
b5c80475 FF |
530 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
531 | ath_rx_tasklet(sc, 1, true); | |
532 | ath_rx_tasklet(sc, 1, false); | |
98deeea0 | 533 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
b77f483f | 534 | spin_unlock_bh(&sc->rx.rxflushlock); |
f078f209 LR |
535 | } |
536 | ||
cc65965c JM |
537 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
538 | { | |
539 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
540 | struct ieee80211_mgmt *mgmt; | |
541 | u8 *pos, *end, id, elen; | |
542 | struct ieee80211_tim_ie *tim; | |
543 | ||
544 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
545 | pos = mgmt->u.beacon.variable; | |
546 | end = skb->data + skb->len; | |
547 | ||
548 | while (pos + 2 < end) { | |
549 | id = *pos++; | |
550 | elen = *pos++; | |
551 | if (pos + elen > end) | |
552 | break; | |
553 | ||
554 | if (id == WLAN_EID_TIM) { | |
555 | if (elen < sizeof(*tim)) | |
556 | break; | |
557 | tim = (struct ieee80211_tim_ie *) pos; | |
558 | if (tim->dtim_count != 0) | |
559 | break; | |
560 | return tim->bitmap_ctrl & 0x01; | |
561 | } | |
562 | ||
563 | pos += elen; | |
564 | } | |
565 | ||
566 | return false; | |
567 | } | |
568 | ||
cc65965c JM |
569 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
570 | { | |
571 | struct ieee80211_mgmt *mgmt; | |
1510718d | 572 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
573 | |
574 | if (skb->len < 24 + 8 + 2 + 2) | |
575 | return; | |
576 | ||
577 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
1510718d | 578 | if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) |
cc65965c JM |
579 | return; /* not from our current AP */ |
580 | ||
1b04b930 | 581 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 582 | |
1b04b930 S |
583 | if (sc->ps_flags & PS_BEACON_SYNC) { |
584 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
c46917bb LR |
585 | ath_print(common, ATH_DBG_PS, |
586 | "Reconfigure Beacon timers based on " | |
587 | "timestamp from the AP\n"); | |
ccdfeab6 JM |
588 | ath_beacon_config(sc, NULL); |
589 | } | |
590 | ||
cc65965c JM |
591 | if (ath_beacon_dtim_pending_cab(skb)) { |
592 | /* | |
593 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
594 | * frames. If the last broadcast/multicast frame is not |
595 | * received properly, the next beacon frame will work as | |
596 | * a backup trigger for returning into NETWORK SLEEP state, | |
597 | * so we are waiting for it as well. | |
cc65965c | 598 | */ |
c46917bb LR |
599 | ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " |
600 | "buffered broadcast/multicast frame(s)\n"); | |
1b04b930 | 601 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
602 | return; |
603 | } | |
604 | ||
1b04b930 | 605 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
606 | /* |
607 | * This can happen if a broadcast frame is dropped or the AP | |
608 | * fails to send a frame indicating that all CAB frames have | |
609 | * been delivered. | |
610 | */ | |
1b04b930 | 611 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
612 | ath_print(common, ATH_DBG_PS, |
613 | "PS wait for CAB frames timed out\n"); | |
cc65965c | 614 | } |
cc65965c JM |
615 | } |
616 | ||
617 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) | |
618 | { | |
619 | struct ieee80211_hdr *hdr; | |
c46917bb | 620 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
621 | |
622 | hdr = (struct ieee80211_hdr *)skb->data; | |
623 | ||
624 | /* Process Beacon and CAB receive in PS state */ | |
ededf1f8 VT |
625 | if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) |
626 | && ieee80211_is_beacon(hdr->frame_control)) | |
cc65965c | 627 | ath_rx_ps_beacon(sc, skb); |
1b04b930 | 628 | else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
cc65965c JM |
629 | (ieee80211_is_data(hdr->frame_control) || |
630 | ieee80211_is_action(hdr->frame_control)) && | |
631 | is_multicast_ether_addr(hdr->addr1) && | |
632 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
633 | /* |
634 | * No more broadcast/multicast frames to be received at this | |
635 | * point. | |
636 | */ | |
1b04b930 | 637 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
638 | ath_print(common, ATH_DBG_PS, |
639 | "All PS CAB frames received, back to sleep\n"); | |
1b04b930 | 640 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
641 | !is_multicast_ether_addr(hdr->addr1) && |
642 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 643 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
c46917bb LR |
644 | ath_print(common, ATH_DBG_PS, |
645 | "Going back to sleep after having received " | |
f643e51d | 646 | "PS-Poll data (0x%lx)\n", |
1b04b930 S |
647 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
648 | PS_WAIT_FOR_CAB | | |
649 | PS_WAIT_FOR_PSPOLL_DATA | | |
650 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
651 | } |
652 | } | |
653 | ||
b4afffc0 LR |
654 | static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, |
655 | struct ath_softc *sc, struct sk_buff *skb, | |
5ca42627 | 656 | struct ieee80211_rx_status *rxs) |
9d64a3cf JM |
657 | { |
658 | struct ieee80211_hdr *hdr; | |
659 | ||
660 | hdr = (struct ieee80211_hdr *)skb->data; | |
661 | ||
662 | /* Send the frame to mac80211 */ | |
663 | if (is_multicast_ether_addr(hdr->addr1)) { | |
664 | int i; | |
665 | /* | |
666 | * Deliver broadcast/multicast frames to all suitable | |
667 | * virtual wiphys. | |
668 | */ | |
669 | /* TODO: filter based on channel configuration */ | |
670 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
671 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
672 | struct sk_buff *nskb; | |
673 | if (aphy == NULL) | |
674 | continue; | |
675 | nskb = skb_copy(skb, GFP_ATOMIC); | |
5ca42627 LR |
676 | if (!nskb) |
677 | continue; | |
678 | ieee80211_rx(aphy->hw, nskb); | |
9d64a3cf | 679 | } |
f1d58c25 | 680 | ieee80211_rx(sc->hw, skb); |
5ca42627 | 681 | } else |
9d64a3cf | 682 | /* Deliver unicast frames based on receiver address */ |
b4afffc0 | 683 | ieee80211_rx(hw, skb); |
9d64a3cf JM |
684 | } |
685 | ||
b5c80475 FF |
686 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
687 | enum ath9k_rx_qtype qtype) | |
f078f209 | 688 | { |
b5c80475 FF |
689 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
690 | struct ath_hw *ah = sc->sc_ah; | |
691 | struct ath_common *common = ath9k_hw_common(ah); | |
692 | struct sk_buff *skb; | |
693 | struct ath_buf *bf; | |
694 | int ret; | |
695 | ||
696 | skb = skb_peek(&rx_edma->rx_fifo); | |
697 | if (!skb) | |
698 | return false; | |
699 | ||
700 | bf = SKB_CB_ATHBUF(skb); | |
701 | BUG_ON(!bf); | |
702 | ||
ce9426d1 | 703 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
704 | common->rx_bufsize, DMA_FROM_DEVICE); |
705 | ||
706 | ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); | |
ce9426d1 ML |
707 | if (ret == -EINPROGRESS) { |
708 | /*let device gain the buffer again*/ | |
709 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
710 | common->rx_bufsize, DMA_FROM_DEVICE); | |
b5c80475 | 711 | return false; |
ce9426d1 | 712 | } |
b5c80475 FF |
713 | |
714 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
715 | if (ret == -EINVAL) { | |
716 | /* corrupt descriptor, skip this one and the following one */ | |
717 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
718 | ath_rx_edma_buf_link(sc, qtype); | |
719 | skb = skb_peek(&rx_edma->rx_fifo); | |
720 | if (!skb) | |
721 | return true; | |
722 | ||
723 | bf = SKB_CB_ATHBUF(skb); | |
724 | BUG_ON(!bf); | |
725 | ||
726 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
727 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
728 | ath_rx_edma_buf_link(sc, qtype); | |
083e3e8d | 729 | return true; |
b5c80475 FF |
730 | } |
731 | skb_queue_tail(&rx_edma->rx_buffers, skb); | |
732 | ||
733 | return true; | |
734 | } | |
f078f209 | 735 | |
b5c80475 FF |
736 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
737 | struct ath_rx_status *rs, | |
738 | enum ath9k_rx_qtype qtype) | |
739 | { | |
740 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; | |
741 | struct sk_buff *skb; | |
be0418ad | 742 | struct ath_buf *bf; |
b5c80475 FF |
743 | |
744 | while (ath_edma_get_buffers(sc, qtype)); | |
745 | skb = __skb_dequeue(&rx_edma->rx_buffers); | |
746 | if (!skb) | |
747 | return NULL; | |
748 | ||
749 | bf = SKB_CB_ATHBUF(skb); | |
750 | ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); | |
751 | return bf; | |
752 | } | |
753 | ||
754 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | |
755 | struct ath_rx_status *rs) | |
756 | { | |
757 | struct ath_hw *ah = sc->sc_ah; | |
758 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 759 | struct ath_desc *ds; |
b5c80475 FF |
760 | struct ath_buf *bf; |
761 | int ret; | |
762 | ||
763 | if (list_empty(&sc->rx.rxbuf)) { | |
764 | sc->rx.rxlink = NULL; | |
765 | return NULL; | |
766 | } | |
767 | ||
768 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | |
769 | ds = bf->bf_desc; | |
770 | ||
771 | /* | |
772 | * Must provide the virtual address of the current | |
773 | * descriptor, the physical address, and the virtual | |
774 | * address of the next descriptor in the h/w chain. | |
775 | * This allows the HAL to look ahead to see if the | |
776 | * hardware is done with a descriptor by checking the | |
777 | * done bit in the following descriptor and the address | |
778 | * of the current descriptor the DMA engine is working | |
779 | * on. All this is necessary because of our use of | |
780 | * a self-linked list to avoid rx overruns. | |
781 | */ | |
782 | ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); | |
783 | if (ret == -EINPROGRESS) { | |
784 | struct ath_rx_status trs; | |
785 | struct ath_buf *tbf; | |
786 | struct ath_desc *tds; | |
787 | ||
788 | memset(&trs, 0, sizeof(trs)); | |
789 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
790 | sc->rx.rxlink = NULL; | |
791 | return NULL; | |
792 | } | |
793 | ||
794 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
795 | ||
796 | /* | |
797 | * On some hardware the descriptor status words could | |
798 | * get corrupted, including the done bit. Because of | |
799 | * this, check if the next descriptor's done bit is | |
800 | * set or not. | |
801 | * | |
802 | * If the next descriptor's done bit is set, the current | |
803 | * descriptor has been corrupted. Force s/w to discard | |
804 | * this descriptor and continue... | |
805 | */ | |
806 | ||
807 | tds = tbf->bf_desc; | |
808 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | |
809 | if (ret == -EINPROGRESS) | |
810 | return NULL; | |
811 | } | |
812 | ||
813 | if (!bf->bf_mpdu) | |
814 | return bf; | |
815 | ||
816 | /* | |
817 | * Synchronize the DMA transfer with CPU before | |
818 | * 1. accessing the frame | |
819 | * 2. requeueing the same buffer to h/w | |
820 | */ | |
ce9426d1 | 821 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
822 | common->rx_bufsize, |
823 | DMA_FROM_DEVICE); | |
824 | ||
825 | return bf; | |
826 | } | |
827 | ||
d435700f S |
828 | /* Assumes you've already done the endian to CPU conversion */ |
829 | static bool ath9k_rx_accept(struct ath_common *common, | |
9f167f64 | 830 | struct ieee80211_hdr *hdr, |
d435700f S |
831 | struct ieee80211_rx_status *rxs, |
832 | struct ath_rx_status *rx_stats, | |
833 | bool *decrypt_error) | |
834 | { | |
835 | struct ath_hw *ah = common->ah; | |
d435700f | 836 | __le16 fc; |
b7b1b512 | 837 | u8 rx_status_len = ah->caps.rx_status_len; |
d435700f | 838 | |
d435700f S |
839 | fc = hdr->frame_control; |
840 | ||
841 | if (!rx_stats->rs_datalen) | |
842 | return false; | |
843 | /* | |
844 | * rs_status follows rs_datalen so if rs_datalen is too large | |
845 | * we can take a hint that hardware corrupted it, so ignore | |
846 | * those frames. | |
847 | */ | |
b7b1b512 | 848 | if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) |
d435700f S |
849 | return false; |
850 | ||
851 | /* | |
852 | * rs_more indicates chained descriptors which can be used | |
853 | * to link buffers together for a sort of scatter-gather | |
854 | * operation. | |
855 | * reject the frame, we don't support scatter-gather yet and | |
856 | * the frame is probably corrupt anyway | |
857 | */ | |
858 | if (rx_stats->rs_more) | |
859 | return false; | |
860 | ||
861 | /* | |
862 | * The rx_stats->rs_status will not be set until the end of the | |
863 | * chained descriptors so it can be ignored if rs_more is set. The | |
864 | * rs_more will be false at the last element of the chained | |
865 | * descriptors. | |
866 | */ | |
867 | if (rx_stats->rs_status != 0) { | |
868 | if (rx_stats->rs_status & ATH9K_RXERR_CRC) | |
869 | rxs->flag |= RX_FLAG_FAILED_FCS_CRC; | |
870 | if (rx_stats->rs_status & ATH9K_RXERR_PHY) | |
871 | return false; | |
872 | ||
873 | if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { | |
874 | *decrypt_error = true; | |
875 | } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { | |
876 | if (ieee80211_is_ctl(fc)) | |
877 | /* | |
878 | * Sometimes, we get invalid | |
879 | * MIC failures on valid control frames. | |
880 | * Remove these mic errors. | |
881 | */ | |
882 | rx_stats->rs_status &= ~ATH9K_RXERR_MIC; | |
883 | else | |
884 | rxs->flag |= RX_FLAG_MMIC_ERROR; | |
885 | } | |
886 | /* | |
887 | * Reject error frames with the exception of | |
888 | * decryption and MIC failures. For monitor mode, | |
889 | * we also ignore the CRC error. | |
890 | */ | |
891 | if (ah->opmode == NL80211_IFTYPE_MONITOR) { | |
892 | if (rx_stats->rs_status & | |
893 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | |
894 | ATH9K_RXERR_CRC)) | |
895 | return false; | |
896 | } else { | |
897 | if (rx_stats->rs_status & | |
898 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | |
899 | return false; | |
900 | } | |
901 | } | |
902 | } | |
903 | return true; | |
904 | } | |
905 | ||
906 | static int ath9k_process_rate(struct ath_common *common, | |
907 | struct ieee80211_hw *hw, | |
908 | struct ath_rx_status *rx_stats, | |
9f167f64 | 909 | struct ieee80211_rx_status *rxs) |
d435700f S |
910 | { |
911 | struct ieee80211_supported_band *sband; | |
912 | enum ieee80211_band band; | |
913 | unsigned int i = 0; | |
914 | ||
915 | band = hw->conf.channel->band; | |
916 | sband = hw->wiphy->bands[band]; | |
917 | ||
918 | if (rx_stats->rs_rate & 0x80) { | |
919 | /* HT rate */ | |
920 | rxs->flag |= RX_FLAG_HT; | |
921 | if (rx_stats->rs_flags & ATH9K_RX_2040) | |
922 | rxs->flag |= RX_FLAG_40MHZ; | |
923 | if (rx_stats->rs_flags & ATH9K_RX_GI) | |
924 | rxs->flag |= RX_FLAG_SHORT_GI; | |
925 | rxs->rate_idx = rx_stats->rs_rate & 0x7f; | |
926 | return 0; | |
927 | } | |
928 | ||
929 | for (i = 0; i < sband->n_bitrates; i++) { | |
930 | if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { | |
931 | rxs->rate_idx = i; | |
932 | return 0; | |
933 | } | |
934 | if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { | |
935 | rxs->flag |= RX_FLAG_SHORTPRE; | |
936 | rxs->rate_idx = i; | |
937 | return 0; | |
938 | } | |
939 | } | |
940 | ||
941 | /* | |
942 | * No valid hardware bitrate found -- we should not get here | |
943 | * because hardware has already validated this frame as OK. | |
944 | */ | |
945 | ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " | |
946 | "0x%02x using 1 Mbit\n", rx_stats->rs_rate); | |
d435700f S |
947 | |
948 | return -EINVAL; | |
949 | } | |
950 | ||
951 | static void ath9k_process_rssi(struct ath_common *common, | |
952 | struct ieee80211_hw *hw, | |
9f167f64 | 953 | struct ieee80211_hdr *hdr, |
d435700f S |
954 | struct ath_rx_status *rx_stats) |
955 | { | |
956 | struct ath_hw *ah = common->ah; | |
957 | struct ieee80211_sta *sta; | |
d435700f S |
958 | struct ath_node *an; |
959 | int last_rssi = ATH_RSSI_DUMMY_MARKER; | |
960 | __le16 fc; | |
961 | ||
d435700f S |
962 | fc = hdr->frame_control; |
963 | ||
964 | rcu_read_lock(); | |
965 | /* | |
966 | * XXX: use ieee80211_find_sta! This requires quite a bit of work | |
967 | * under the current ath9k virtual wiphy implementation as we have | |
968 | * no way of tying a vif to wiphy. Typically vifs are attached to | |
969 | * at least one sdata of a wiphy on mac80211 but with ath9k virtual | |
970 | * wiphy you'd have to iterate over every wiphy and each sdata. | |
971 | */ | |
972 | sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); | |
973 | if (sta) { | |
974 | an = (struct ath_node *) sta->drv_priv; | |
975 | if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && | |
976 | !rx_stats->rs_moreaggr) | |
977 | ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); | |
978 | last_rssi = an->last_rssi; | |
979 | } | |
980 | rcu_read_unlock(); | |
981 | ||
982 | if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) | |
983 | rx_stats->rs_rssi = ATH_EP_RND(last_rssi, | |
984 | ATH_RSSI_EP_MULTIPLIER); | |
985 | if (rx_stats->rs_rssi < 0) | |
986 | rx_stats->rs_rssi = 0; | |
987 | ||
988 | /* Update Beacon RSSI, this is used by ANI. */ | |
989 | if (ieee80211_is_beacon(fc)) | |
990 | ah->stats.avgbrssi = rx_stats->rs_rssi; | |
991 | } | |
992 | ||
993 | /* | |
994 | * For Decrypt or Demic errors, we only mark packet status here and always push | |
995 | * up the frame up to let mac80211 handle the actual error case, be it no | |
996 | * decryption key or real decryption error. This let us keep statistics there. | |
997 | */ | |
998 | static int ath9k_rx_skb_preprocess(struct ath_common *common, | |
999 | struct ieee80211_hw *hw, | |
9f167f64 | 1000 | struct ieee80211_hdr *hdr, |
d435700f S |
1001 | struct ath_rx_status *rx_stats, |
1002 | struct ieee80211_rx_status *rx_status, | |
1003 | bool *decrypt_error) | |
1004 | { | |
1005 | struct ath_hw *ah = common->ah; | |
1006 | ||
1007 | memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); | |
1008 | ||
1009 | /* | |
1010 | * everything but the rate is checked here, the rate check is done | |
1011 | * separately to avoid doing two lookups for a rate for each frame. | |
1012 | */ | |
9f167f64 | 1013 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) |
d435700f S |
1014 | return -EINVAL; |
1015 | ||
9f167f64 | 1016 | ath9k_process_rssi(common, hw, hdr, rx_stats); |
d435700f | 1017 | |
9f167f64 | 1018 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) |
d435700f S |
1019 | return -EINVAL; |
1020 | ||
1021 | rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp); | |
1022 | rx_status->band = hw->conf.channel->band; | |
1023 | rx_status->freq = hw->conf.channel->center_freq; | |
1024 | rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; | |
1025 | rx_status->antenna = rx_stats->rs_antenna; | |
1026 | rx_status->flag |= RX_FLAG_TSFT; | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | static void ath9k_rx_skb_postprocess(struct ath_common *common, | |
1032 | struct sk_buff *skb, | |
1033 | struct ath_rx_status *rx_stats, | |
1034 | struct ieee80211_rx_status *rxs, | |
1035 | bool decrypt_error) | |
1036 | { | |
1037 | struct ath_hw *ah = common->ah; | |
1038 | struct ieee80211_hdr *hdr; | |
1039 | int hdrlen, padpos, padsize; | |
1040 | u8 keyix; | |
1041 | __le16 fc; | |
1042 | ||
1043 | /* see if any padding is done by the hw and remove it */ | |
1044 | hdr = (struct ieee80211_hdr *) skb->data; | |
1045 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
1046 | fc = hdr->frame_control; | |
1047 | padpos = ath9k_cmn_padpos(hdr->frame_control); | |
1048 | ||
1049 | /* The MAC header is padded to have 32-bit boundary if the | |
1050 | * packet payload is non-zero. The general calculation for | |
1051 | * padsize would take into account odd header lengths: | |
1052 | * padsize = (4 - padpos % 4) % 4; However, since only | |
1053 | * even-length headers are used, padding can only be 0 or 2 | |
1054 | * bytes and we can optimize this a bit. In addition, we must | |
1055 | * not try to remove padding from short control frames that do | |
1056 | * not have payload. */ | |
1057 | padsize = padpos & 3; | |
1058 | if (padsize && skb->len>=padpos+padsize+FCS_LEN) { | |
1059 | memmove(skb->data + padsize, skb->data, padpos); | |
1060 | skb_pull(skb, padsize); | |
1061 | } | |
1062 | ||
1063 | keyix = rx_stats->rs_keyix; | |
1064 | ||
1065 | if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && | |
1066 | ieee80211_has_protected(fc)) { | |
1067 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1068 | } else if (ieee80211_has_protected(fc) | |
1069 | && !decrypt_error && skb->len >= hdrlen + 4) { | |
1070 | keyix = skb->data[hdrlen + 3] >> 6; | |
1071 | ||
1072 | if (test_bit(keyix, common->keymap)) | |
1073 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1074 | } | |
1075 | if (ah->sw_mgmt_crypto && | |
1076 | (rxs->flag & RX_FLAG_DECRYPTED) && | |
1077 | ieee80211_is_mgmt(fc)) | |
1078 | /* Use software decrypt for management frames. */ | |
1079 | rxs->flag &= ~RX_FLAG_DECRYPTED; | |
1080 | } | |
b5c80475 FF |
1081 | |
1082 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) | |
1083 | { | |
1084 | struct ath_buf *bf; | |
cb71d9ba | 1085 | struct sk_buff *skb = NULL, *requeue_skb; |
5ca42627 | 1086 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 1087 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 1088 | struct ath_common *common = ath9k_hw_common(ah); |
b4afffc0 LR |
1089 | /* |
1090 | * The hw can techncically differ from common->hw when using ath9k | |
1091 | * virtual wiphy so to account for that we iterate over the active | |
1092 | * wiphys and find the appropriate wiphy and therefore hw. | |
1093 | */ | |
1094 | struct ieee80211_hw *hw = NULL; | |
be0418ad | 1095 | struct ieee80211_hdr *hdr; |
c9b14170 | 1096 | int retval; |
be0418ad | 1097 | bool decrypt_error = false; |
29bffa96 | 1098 | struct ath_rx_status rs; |
b5c80475 FF |
1099 | enum ath9k_rx_qtype qtype; |
1100 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
1101 | int dma_type; | |
5c6dd921 | 1102 | u8 rx_status_len = ah->caps.rx_status_len; |
be0418ad | 1103 | |
b5c80475 | 1104 | if (edma) |
b5c80475 | 1105 | dma_type = DMA_BIDIRECTIONAL; |
56824223 ML |
1106 | else |
1107 | dma_type = DMA_FROM_DEVICE; | |
b5c80475 FF |
1108 | |
1109 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
b77f483f | 1110 | spin_lock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
1111 | |
1112 | do { | |
1113 | /* If handling rx interrupt and flush is in progress => exit */ | |
98deeea0 | 1114 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
f078f209 LR |
1115 | break; |
1116 | ||
29bffa96 | 1117 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
1118 | if (edma) |
1119 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
1120 | else | |
1121 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 1122 | |
b5c80475 FF |
1123 | if (!bf) |
1124 | break; | |
f078f209 | 1125 | |
f078f209 | 1126 | skb = bf->bf_mpdu; |
be0418ad | 1127 | if (!skb) |
f078f209 | 1128 | continue; |
f078f209 | 1129 | |
5c6dd921 | 1130 | hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); |
5ca42627 LR |
1131 | rxs = IEEE80211_SKB_RXCB(skb); |
1132 | ||
b4afffc0 LR |
1133 | hw = ath_get_virt_hw(sc, hdr); |
1134 | ||
29bffa96 | 1135 | ath_debug_stat_rx(sc, &rs); |
1395d3f0 | 1136 | |
f078f209 | 1137 | /* |
be0418ad S |
1138 | * If we're asked to flush receive queue, directly |
1139 | * chain it back at the queue without processing it. | |
f078f209 | 1140 | */ |
be0418ad | 1141 | if (flush) |
cb71d9ba | 1142 | goto requeue; |
f078f209 | 1143 | |
9f167f64 | 1144 | retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, |
d435700f | 1145 | rxs, &decrypt_error); |
1e875e9f | 1146 | if (retval) |
cb71d9ba LR |
1147 | goto requeue; |
1148 | ||
1149 | /* Ensure we always have an skb to requeue once we are done | |
1150 | * processing the current buffer's skb */ | |
cc861f74 | 1151 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
1152 | |
1153 | /* If there is no memory we ignore the current RX'd frame, | |
1154 | * tell hardware it can give us a new frame using the old | |
b77f483f | 1155 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba LR |
1156 | * processing. */ |
1157 | if (!requeue_skb) | |
1158 | goto requeue; | |
f078f209 | 1159 | |
9bf9fca8 | 1160 | /* Unmap the frame */ |
7da3c55c | 1161 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 1162 | common->rx_bufsize, |
b5c80475 | 1163 | dma_type); |
f078f209 | 1164 | |
b5c80475 FF |
1165 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
1166 | if (ah->caps.rx_status_len) | |
1167 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 1168 | |
d435700f S |
1169 | ath9k_rx_skb_postprocess(common, skb, &rs, |
1170 | rxs, decrypt_error); | |
be0418ad | 1171 | |
cb71d9ba LR |
1172 | /* We will now give hardware our shiny new allocated skb */ |
1173 | bf->bf_mpdu = requeue_skb; | |
7da3c55c | 1174 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
cc861f74 | 1175 | common->rx_bufsize, |
b5c80475 | 1176 | dma_type); |
7da3c55c | 1177 | if (unlikely(dma_mapping_error(sc->dev, |
f8316df1 LR |
1178 | bf->bf_buf_addr))) { |
1179 | dev_kfree_skb_any(requeue_skb); | |
1180 | bf->bf_mpdu = NULL; | |
c46917bb LR |
1181 | ath_print(common, ATH_DBG_FATAL, |
1182 | "dma_mapping_error() on RX\n"); | |
5ca42627 | 1183 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
f8316df1 LR |
1184 | break; |
1185 | } | |
cb71d9ba | 1186 | bf->bf_dmacontext = bf->bf_buf_addr; |
f078f209 LR |
1187 | |
1188 | /* | |
1189 | * change the default rx antenna if rx diversity chooses the | |
1190 | * other antenna 3 times in a row. | |
1191 | */ | |
29bffa96 | 1192 | if (sc->rx.defant != rs.rs_antenna) { |
b77f483f | 1193 | if (++sc->rx.rxotherant >= 3) |
29bffa96 | 1194 | ath_setdefantenna(sc, rs.rs_antenna); |
f078f209 | 1195 | } else { |
b77f483f | 1196 | sc->rx.rxotherant = 0; |
f078f209 | 1197 | } |
3cbb5dd7 | 1198 | |
ededf1f8 VT |
1199 | if (unlikely(ath9k_check_auto_sleep(sc) || |
1200 | (sc->ps_flags & (PS_WAIT_FOR_BEACON | | |
1201 | PS_WAIT_FOR_CAB | | |
1202 | PS_WAIT_FOR_PSPOLL_DATA)))) | |
cc65965c JM |
1203 | ath_rx_ps(sc, skb); |
1204 | ||
5ca42627 | 1205 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
cc65965c | 1206 | |
cb71d9ba | 1207 | requeue: |
b5c80475 FF |
1208 | if (edma) { |
1209 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
1210 | ath_rx_edma_buf_link(sc, qtype); | |
1211 | } else { | |
1212 | list_move_tail(&bf->list, &sc->rx.rxbuf); | |
1213 | ath_rx_buf_link(sc, bf); | |
1214 | } | |
be0418ad S |
1215 | } while (1); |
1216 | ||
b77f483f | 1217 | spin_unlock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
1218 | |
1219 | return 0; | |
f078f209 | 1220 | } |