2 * Copyright (c) 2010 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 static const int subtype_txq_to_hwq
[] = {
24 [WME_AC_BE
] = ATH_TXQ_AC_BE
,
25 [WME_AC_BK
] = ATH_TXQ_AC_BK
,
26 [WME_AC_VI
] = ATH_TXQ_AC_VI
,
27 [WME_AC_VO
] = ATH_TXQ_AC_VO
,
30 #define ATH9K_HTC_INIT_TXQ(subtype) do { \
31 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
32 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
33 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
34 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
35 qi.tqi_physCompBuf = 0; \
36 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \
37 TXQ_FLAG_TXDESCINT_ENABLE; \
40 int get_hw_qnum(u16 queue
, int *hwq_map
)
44 return hwq_map
[WME_AC_VO
];
46 return hwq_map
[WME_AC_VI
];
48 return hwq_map
[WME_AC_BE
];
50 return hwq_map
[WME_AC_BK
];
52 return hwq_map
[WME_AC_BE
];
56 int ath_htc_txq_update(struct ath9k_htc_priv
*priv
, int qnum
,
57 struct ath9k_tx_queue_info
*qinfo
)
59 struct ath_hw
*ah
= priv
->ah
;
61 struct ath9k_tx_queue_info qi
;
63 ath9k_hw_get_txq_props(ah
, qnum
, &qi
);
65 qi
.tqi_aifs
= qinfo
->tqi_aifs
;
66 qi
.tqi_cwmin
= qinfo
->tqi_cwmin
/ 2; /* XXX */
67 qi
.tqi_cwmax
= qinfo
->tqi_cwmax
;
68 qi
.tqi_burstTime
= qinfo
->tqi_burstTime
;
69 qi
.tqi_readyTime
= qinfo
->tqi_readyTime
;
71 if (!ath9k_hw_set_txq_props(ah
, qnum
, &qi
)) {
72 ath_err(ath9k_hw_common(ah
),
73 "Unable to update hardware queue %u!\n", qnum
);
76 ath9k_hw_resettxqueue(ah
, qnum
);
82 int ath9k_htc_tx_start(struct ath9k_htc_priv
*priv
, struct sk_buff
*skb
)
84 struct ieee80211_hdr
*hdr
;
85 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
86 struct ieee80211_sta
*sta
= tx_info
->control
.sta
;
87 struct ath9k_htc_sta
*ista
;
88 struct ath9k_htc_tx_ctl tx_ctl
;
89 enum htc_endpoint_id epid
;
95 hdr
= (struct ieee80211_hdr
*) skb
->data
;
96 fc
= hdr
->frame_control
;
98 if (tx_info
->control
.vif
&&
99 (struct ath9k_htc_vif
*) tx_info
->control
.vif
->drv_priv
)
100 vif_idx
= ((struct ath9k_htc_vif
*)
101 tx_info
->control
.vif
->drv_priv
)->index
;
103 vif_idx
= priv
->nvifs
;
106 ista
= (struct ath9k_htc_sta
*) sta
->drv_priv
;
107 sta_idx
= ista
->index
;
112 memset(&tx_ctl
, 0, sizeof(struct ath9k_htc_tx_ctl
));
114 if (ieee80211_is_data(fc
)) {
115 struct tx_frame_hdr tx_hdr
;
119 memset(&tx_hdr
, 0, sizeof(struct tx_frame_hdr
));
121 tx_hdr
.node_idx
= sta_idx
;
122 tx_hdr
.vif_idx
= vif_idx
;
124 if (tx_info
->flags
& IEEE80211_TX_CTL_AMPDU
) {
125 tx_ctl
.type
= ATH9K_HTC_AMPDU
;
126 tx_hdr
.data_type
= ATH9K_HTC_AMPDU
;
128 tx_ctl
.type
= ATH9K_HTC_NORMAL
;
129 tx_hdr
.data_type
= ATH9K_HTC_NORMAL
;
132 if (ieee80211_is_data_qos(fc
)) {
133 qc
= ieee80211_get_qos_ctl(hdr
);
134 tx_hdr
.tidno
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
137 /* Check for RTS protection */
138 if (priv
->hw
->wiphy
->rts_threshold
!= (u32
) -1)
139 if (skb
->len
> priv
->hw
->wiphy
->rts_threshold
)
140 flags
|= ATH9K_HTC_TX_RTSCTS
;
143 if (!(flags
& ATH9K_HTC_TX_RTSCTS
) &&
144 (priv
->op_flags
& OP_PROTECT_ENABLE
))
145 flags
|= ATH9K_HTC_TX_CTSONLY
;
147 tx_hdr
.flags
= cpu_to_be32(flags
);
148 tx_hdr
.key_type
= ath9k_cmn_get_hw_crypto_keytype(skb
);
149 if (tx_hdr
.key_type
== ATH9K_KEY_TYPE_CLEAR
)
150 tx_hdr
.keyix
= (u8
) ATH9K_TXKEYIX_INVALID
;
152 tx_hdr
.keyix
= tx_info
->control
.hw_key
->hw_key_idx
;
154 tx_fhdr
= skb_push(skb
, sizeof(tx_hdr
));
155 memcpy(tx_fhdr
, (u8
*) &tx_hdr
, sizeof(tx_hdr
));
157 qnum
= skb_get_queue_mapping(skb
);
161 TX_QSTAT_INC(WME_AC_VO
);
162 epid
= priv
->data_vo_ep
;
165 TX_QSTAT_INC(WME_AC_VI
);
166 epid
= priv
->data_vi_ep
;
169 TX_QSTAT_INC(WME_AC_BE
);
170 epid
= priv
->data_be_ep
;
174 TX_QSTAT_INC(WME_AC_BK
);
175 epid
= priv
->data_bk_ep
;
179 struct tx_mgmt_hdr mgmt_hdr
;
181 memset(&mgmt_hdr
, 0, sizeof(struct tx_mgmt_hdr
));
183 tx_ctl
.type
= ATH9K_HTC_NORMAL
;
185 mgmt_hdr
.node_idx
= sta_idx
;
186 mgmt_hdr
.vif_idx
= vif_idx
;
190 mgmt_hdr
.key_type
= ath9k_cmn_get_hw_crypto_keytype(skb
);
191 if (mgmt_hdr
.key_type
== ATH9K_KEY_TYPE_CLEAR
)
192 mgmt_hdr
.keyix
= (u8
) ATH9K_TXKEYIX_INVALID
;
194 mgmt_hdr
.keyix
= tx_info
->control
.hw_key
->hw_key_idx
;
196 tx_fhdr
= skb_push(skb
, sizeof(mgmt_hdr
));
197 memcpy(tx_fhdr
, (u8
*) &mgmt_hdr
, sizeof(mgmt_hdr
));
198 epid
= priv
->mgmt_ep
;
201 return htc_send(priv
->htc
, skb
, epid
, &tx_ctl
);
204 static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv
*priv
,
205 struct ath9k_htc_sta
*ista
, u8 tid
)
209 spin_lock_bh(&priv
->tx_lock
);
210 if ((tid
< ATH9K_HTC_MAX_TID
) && (ista
->tid_state
[tid
] == AGGR_STOP
))
212 spin_unlock_bh(&priv
->tx_lock
);
217 void ath9k_tx_tasklet(unsigned long data
)
219 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*)data
;
220 struct ieee80211_sta
*sta
;
221 struct ieee80211_hdr
*hdr
;
222 struct ieee80211_tx_info
*tx_info
;
223 struct sk_buff
*skb
= NULL
;
226 while ((skb
= skb_dequeue(&priv
->tx_queue
)) != NULL
) {
228 hdr
= (struct ieee80211_hdr
*) skb
->data
;
229 fc
= hdr
->frame_control
;
230 tx_info
= IEEE80211_SKB_CB(skb
);
232 memset(&tx_info
->status
, 0, sizeof(tx_info
->status
));
236 sta
= ieee80211_find_sta(priv
->vif
, hdr
->addr1
);
239 ieee80211_tx_status(priv
->hw
, skb
);
243 /* Check if we need to start aggregation */
245 if (sta
&& conf_is_ht(&priv
->hw
->conf
) &&
246 !(skb
->protocol
== cpu_to_be16(ETH_P_PAE
))) {
247 if (ieee80211_is_data_qos(fc
)) {
249 struct ath9k_htc_sta
*ista
;
251 qc
= ieee80211_get_qos_ctl(hdr
);
253 ista
= (struct ath9k_htc_sta
*)sta
->drv_priv
;
255 if (ath9k_htc_check_tx_aggr(priv
, ista
, tid
)) {
256 ieee80211_start_tx_ba_session(sta
, tid
, 0);
257 spin_lock_bh(&priv
->tx_lock
);
258 ista
->tid_state
[tid
] = AGGR_PROGRESS
;
259 spin_unlock_bh(&priv
->tx_lock
);
266 /* Send status to mac80211 */
267 ieee80211_tx_status(priv
->hw
, skb
);
270 /* Wake TX queues if needed */
271 spin_lock_bh(&priv
->tx_lock
);
272 if (priv
->tx_queues_stop
) {
273 priv
->tx_queues_stop
= false;
274 spin_unlock_bh(&priv
->tx_lock
);
275 ath_dbg(ath9k_hw_common(priv
->ah
), ATH_DBG_XMIT
,
276 "Waking up TX queues\n");
277 ieee80211_wake_queues(priv
->hw
);
280 spin_unlock_bh(&priv
->tx_lock
);
283 void ath9k_htc_txep(void *drv_priv
, struct sk_buff
*skb
,
284 enum htc_endpoint_id ep_id
, bool txok
)
286 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*) drv_priv
;
287 struct ath_common
*common
= ath9k_hw_common(priv
->ah
);
288 struct ieee80211_tx_info
*tx_info
;
293 if (ep_id
== priv
->mgmt_ep
) {
294 skb_pull(skb
, sizeof(struct tx_mgmt_hdr
));
295 } else if ((ep_id
== priv
->data_bk_ep
) ||
296 (ep_id
== priv
->data_be_ep
) ||
297 (ep_id
== priv
->data_vi_ep
) ||
298 (ep_id
== priv
->data_vo_ep
)) {
299 skb_pull(skb
, sizeof(struct tx_frame_hdr
));
301 ath_err(common
, "Unsupported TX EPID: %d\n", ep_id
);
302 dev_kfree_skb_any(skb
);
306 tx_info
= IEEE80211_SKB_CB(skb
);
309 tx_info
->flags
|= IEEE80211_TX_STAT_ACK
;
311 skb_queue_tail(&priv
->tx_queue
, skb
);
312 tasklet_schedule(&priv
->tx_tasklet
);
315 int ath9k_tx_init(struct ath9k_htc_priv
*priv
)
317 skb_queue_head_init(&priv
->tx_queue
);
321 void ath9k_tx_cleanup(struct ath9k_htc_priv
*priv
)
326 bool ath9k_htc_txq_setup(struct ath9k_htc_priv
*priv
, int subtype
)
328 struct ath_hw
*ah
= priv
->ah
;
329 struct ath_common
*common
= ath9k_hw_common(ah
);
330 struct ath9k_tx_queue_info qi
;
333 memset(&qi
, 0, sizeof(qi
));
334 ATH9K_HTC_INIT_TXQ(subtype
);
336 qnum
= ath9k_hw_setuptxqueue(priv
->ah
, ATH9K_TX_QUEUE_DATA
, &qi
);
340 if (qnum
>= ARRAY_SIZE(priv
->hwq_map
)) {
341 ath_err(common
, "qnum %u out of range, max %zu!\n",
342 qnum
, ARRAY_SIZE(priv
->hwq_map
));
343 ath9k_hw_releasetxqueue(ah
, qnum
);
347 priv
->hwq_map
[subtype
] = qnum
;
351 int ath9k_htc_cabq_setup(struct ath9k_htc_priv
*priv
)
353 struct ath9k_tx_queue_info qi
;
355 memset(&qi
, 0, sizeof(qi
));
356 ATH9K_HTC_INIT_TXQ(0);
358 return ath9k_hw_setuptxqueue(priv
->ah
, ATH9K_TX_QUEUE_CAB
, &qi
);
366 * Calculate the RX filter to be set in the HW.
368 u32
ath9k_htc_calcrxfilter(struct ath9k_htc_priv
*priv
)
370 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
372 struct ath_hw
*ah
= priv
->ah
;
375 rfilt
= (ath9k_hw_getrxfilter(ah
) & RX_FILTER_PRESERVE
)
376 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
377 | ATH9K_RX_FILTER_MCAST
;
379 if (priv
->rxfilter
& FIF_PROBE_REQ
)
380 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
383 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
384 * mode interface or when in monitor mode. AP mode does not need this
385 * since it receives all in-BSS frames anyway.
387 if (((ah
->opmode
!= NL80211_IFTYPE_AP
) &&
388 (priv
->rxfilter
& FIF_PROMISC_IN_BSS
)) ||
389 (ah
->opmode
== NL80211_IFTYPE_MONITOR
))
390 rfilt
|= ATH9K_RX_FILTER_PROM
;
392 if (priv
->rxfilter
& FIF_CONTROL
)
393 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
395 if ((ah
->opmode
== NL80211_IFTYPE_STATION
) &&
396 !(priv
->rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
397 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
399 rfilt
|= ATH9K_RX_FILTER_BEACON
;
401 if (conf_is_ht(&priv
->hw
->conf
))
402 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
406 #undef RX_FILTER_PRESERVE
410 * Recv initialization for opmode change.
412 static void ath9k_htc_opmode_init(struct ath9k_htc_priv
*priv
)
414 struct ath_hw
*ah
= priv
->ah
;
415 struct ath_common
*common
= ath9k_hw_common(ah
);
419 /* configure rx filter */
420 rfilt
= ath9k_htc_calcrxfilter(priv
);
421 ath9k_hw_setrxfilter(ah
, rfilt
);
423 /* configure bssid mask */
424 ath_hw_setbssidmask(common
);
426 /* configure operational mode */
427 ath9k_hw_setopmode(ah
);
429 /* calculate and install multicast filter */
430 mfilt
[0] = mfilt
[1] = ~0;
431 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
434 void ath9k_host_rx_init(struct ath9k_htc_priv
*priv
)
436 ath9k_hw_rxena(priv
->ah
);
437 ath9k_htc_opmode_init(priv
);
438 ath9k_hw_startpcureceive(priv
->ah
, (priv
->op_flags
& OP_SCANNING
));
439 priv
->rx
.last_rssi
= ATH_RSSI_DUMMY_MARKER
;
442 static void ath9k_process_rate(struct ieee80211_hw
*hw
,
443 struct ieee80211_rx_status
*rxs
,
444 u8 rx_rate
, u8 rs_flags
)
446 struct ieee80211_supported_band
*sband
;
447 enum ieee80211_band band
;
450 if (rx_rate
& 0x80) {
452 rxs
->flag
|= RX_FLAG_HT
;
453 if (rs_flags
& ATH9K_RX_2040
)
454 rxs
->flag
|= RX_FLAG_40MHZ
;
455 if (rs_flags
& ATH9K_RX_GI
)
456 rxs
->flag
|= RX_FLAG_SHORT_GI
;
457 rxs
->rate_idx
= rx_rate
& 0x7f;
461 band
= hw
->conf
.channel
->band
;
462 sband
= hw
->wiphy
->bands
[band
];
464 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
465 if (sband
->bitrates
[i
].hw_value
== rx_rate
) {
469 if (sband
->bitrates
[i
].hw_value_short
== rx_rate
) {
471 rxs
->flag
|= RX_FLAG_SHORTPRE
;
478 static bool ath9k_rx_prepare(struct ath9k_htc_priv
*priv
,
479 struct ath9k_htc_rxbuf
*rxbuf
,
480 struct ieee80211_rx_status
*rx_status
)
483 struct ieee80211_hdr
*hdr
;
484 struct ieee80211_hw
*hw
= priv
->hw
;
485 struct sk_buff
*skb
= rxbuf
->skb
;
486 struct ath_common
*common
= ath9k_hw_common(priv
->ah
);
487 struct ath_htc_rx_status
*rxstatus
;
488 int hdrlen
, padpos
, padsize
;
489 int last_rssi
= ATH_RSSI_DUMMY_MARKER
;
492 if (skb
->len
<= HTC_RX_FRAME_HEADER_SIZE
) {
493 ath_err(common
, "Corrupted RX frame, dropping\n");
497 rxstatus
= (struct ath_htc_rx_status
*)skb
->data
;
499 if (be16_to_cpu(rxstatus
->rs_datalen
) -
500 (skb
->len
- HTC_RX_FRAME_HEADER_SIZE
) != 0) {
502 "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
503 rxstatus
->rs_datalen
, skb
->len
);
507 /* Get the RX status information */
508 memcpy(&rxbuf
->rxstatus
, rxstatus
, HTC_RX_FRAME_HEADER_SIZE
);
509 skb_pull(skb
, HTC_RX_FRAME_HEADER_SIZE
);
511 hdr
= (struct ieee80211_hdr
*)skb
->data
;
512 fc
= hdr
->frame_control
;
513 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
515 padpos
= ath9k_cmn_padpos(fc
);
517 padsize
= padpos
& 3;
518 if (padsize
&& skb
->len
>= padpos
+padsize
+FCS_LEN
) {
519 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
520 skb_pull(skb
, padsize
);
523 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
525 if (rxbuf
->rxstatus
.rs_status
!= 0) {
526 if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_CRC
)
527 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
528 if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_PHY
)
531 if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_DECRYPT
) {
533 } else if (rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_MIC
) {
534 if (ieee80211_is_ctl(fc
))
536 * Sometimes, we get invalid
537 * MIC failures on valid control frames.
538 * Remove these mic errors.
540 rxbuf
->rxstatus
.rs_status
&= ~ATH9K_RXERR_MIC
;
542 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
546 * Reject error frames with the exception of
547 * decryption and MIC failures. For monitor mode,
548 * we also ignore the CRC error.
550 if (priv
->ah
->opmode
== NL80211_IFTYPE_MONITOR
) {
551 if (rxbuf
->rxstatus
.rs_status
&
552 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
556 if (rxbuf
->rxstatus
.rs_status
&
557 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
563 if (!(rxbuf
->rxstatus
.rs_status
& ATH9K_RXERR_DECRYPT
)) {
565 keyix
= rxbuf
->rxstatus
.rs_keyix
;
566 if (keyix
!= ATH9K_RXKEYIX_INVALID
) {
567 rx_status
->flag
|= RX_FLAG_DECRYPTED
;
568 } else if (ieee80211_has_protected(fc
) &&
569 skb
->len
>= hdrlen
+ 4) {
570 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
571 if (test_bit(keyix
, common
->keymap
))
572 rx_status
->flag
|= RX_FLAG_DECRYPTED
;
576 ath9k_process_rate(hw
, rx_status
, rxbuf
->rxstatus
.rs_rate
,
577 rxbuf
->rxstatus
.rs_flags
);
579 if (priv
->op_flags
& OP_ASSOCIATED
) {
580 if (rxbuf
->rxstatus
.rs_rssi
!= ATH9K_RSSI_BAD
&&
581 !rxbuf
->rxstatus
.rs_moreaggr
)
582 ATH_RSSI_LPF(priv
->rx
.last_rssi
,
583 rxbuf
->rxstatus
.rs_rssi
);
585 last_rssi
= priv
->rx
.last_rssi
;
587 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
588 rxbuf
->rxstatus
.rs_rssi
= ATH_EP_RND(last_rssi
,
589 ATH_RSSI_EP_MULTIPLIER
);
591 if (rxbuf
->rxstatus
.rs_rssi
< 0)
592 rxbuf
->rxstatus
.rs_rssi
= 0;
594 if (ieee80211_is_beacon(fc
))
595 priv
->ah
->stats
.avgbrssi
= rxbuf
->rxstatus
.rs_rssi
;
598 rx_status
->mactime
= be64_to_cpu(rxbuf
->rxstatus
.rs_tstamp
);
599 rx_status
->band
= hw
->conf
.channel
->band
;
600 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
601 rx_status
->signal
= rxbuf
->rxstatus
.rs_rssi
+ ATH_DEFAULT_NOISE_FLOOR
;
602 rx_status
->antenna
= rxbuf
->rxstatus
.rs_antenna
;
603 rx_status
->flag
|= RX_FLAG_TSFT
;
612 * FIXME: Handle FLUSH later on.
614 void ath9k_rx_tasklet(unsigned long data
)
616 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*)data
;
617 struct ath9k_htc_rxbuf
*rxbuf
= NULL
, *tmp_buf
= NULL
;
618 struct ieee80211_rx_status rx_status
;
621 struct ieee80211_hdr
*hdr
;
624 spin_lock_irqsave(&priv
->rx
.rxbuflock
, flags
);
625 list_for_each_entry(tmp_buf
, &priv
->rx
.rxbuf
, list
) {
626 if (tmp_buf
->in_process
) {
633 spin_unlock_irqrestore(&priv
->rx
.rxbuflock
, flags
);
640 if (!ath9k_rx_prepare(priv
, rxbuf
, &rx_status
)) {
641 dev_kfree_skb_any(rxbuf
->skb
);
645 memcpy(IEEE80211_SKB_RXCB(rxbuf
->skb
), &rx_status
,
646 sizeof(struct ieee80211_rx_status
));
648 hdr
= (struct ieee80211_hdr
*) skb
->data
;
650 if (ieee80211_is_beacon(hdr
->frame_control
) && priv
->ps_enabled
)
651 ieee80211_queue_work(priv
->hw
, &priv
->ps_work
);
653 spin_unlock_irqrestore(&priv
->rx
.rxbuflock
, flags
);
655 ieee80211_rx(priv
->hw
, skb
);
657 spin_lock_irqsave(&priv
->rx
.rxbuflock
, flags
);
659 rxbuf
->in_process
= false;
661 list_move_tail(&rxbuf
->list
, &priv
->rx
.rxbuf
);
663 spin_unlock_irqrestore(&priv
->rx
.rxbuflock
, flags
);
668 void ath9k_htc_rxep(void *drv_priv
, struct sk_buff
*skb
,
669 enum htc_endpoint_id ep_id
)
671 struct ath9k_htc_priv
*priv
= (struct ath9k_htc_priv
*)drv_priv
;
672 struct ath_hw
*ah
= priv
->ah
;
673 struct ath_common
*common
= ath9k_hw_common(ah
);
674 struct ath9k_htc_rxbuf
*rxbuf
= NULL
, *tmp_buf
= NULL
;
676 spin_lock(&priv
->rx
.rxbuflock
);
677 list_for_each_entry(tmp_buf
, &priv
->rx
.rxbuf
, list
) {
678 if (!tmp_buf
->in_process
) {
683 spin_unlock(&priv
->rx
.rxbuflock
);
686 ath_dbg(common
, ATH_DBG_ANY
,
687 "No free RX buffer\n");
691 spin_lock(&priv
->rx
.rxbuflock
);
693 rxbuf
->in_process
= true;
694 spin_unlock(&priv
->rx
.rxbuflock
);
696 tasklet_schedule(&priv
->rx_tasklet
);
699 dev_kfree_skb_any(skb
);
702 /* FIXME: Locking for cleanup/init */
704 void ath9k_rx_cleanup(struct ath9k_htc_priv
*priv
)
706 struct ath9k_htc_rxbuf
*rxbuf
, *tbuf
;
708 list_for_each_entry_safe(rxbuf
, tbuf
, &priv
->rx
.rxbuf
, list
) {
709 list_del(&rxbuf
->list
);
711 dev_kfree_skb_any(rxbuf
->skb
);
716 int ath9k_rx_init(struct ath9k_htc_priv
*priv
)
718 struct ath_hw
*ah
= priv
->ah
;
719 struct ath_common
*common
= ath9k_hw_common(ah
);
720 struct ath9k_htc_rxbuf
*rxbuf
;
723 INIT_LIST_HEAD(&priv
->rx
.rxbuf
);
724 spin_lock_init(&priv
->rx
.rxbuflock
);
726 for (i
= 0; i
< ATH9K_HTC_RXBUF
; i
++) {
727 rxbuf
= kzalloc(sizeof(struct ath9k_htc_rxbuf
), GFP_KERNEL
);
729 ath_err(common
, "Unable to allocate RX buffers\n");
732 list_add_tail(&rxbuf
->list
, &priv
->rx
.rxbuf
);
738 ath9k_rx_cleanup(priv
);