2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 /* slightly larger than one large A-MPDU */
29 #define HTT_RX_RING_SIZE_MIN 128
31 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
32 #define HTT_RX_RING_SIZE_MAX 2048
34 #define HTT_RX_AVG_FRM_BYTES 1000
36 /* ms, very conservative */
37 #define HTT_RX_HOST_LATENCY_MAX_MS 20
39 /* ms, conservative */
40 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
42 /* when under memory pressure rx ring refill may fail and needs a retry */
43 #define HTT_RX_RING_REFILL_RETRY_MS 50
45 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
46 static void ath10k_htt_txrx_compl_task(unsigned long ptr
);
48 static int ath10k_htt_rx_ring_size(struct ath10k_htt
*htt
)
53 * It is expected that the host CPU will typically be able to
54 * service the rx indication from one A-MPDU before the rx
55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
56 * later. However, the rx ring should be sized very conservatively,
57 * to accomodate the worst reasonable delay before the host CPU
58 * services a rx indication interrupt.
60 * The rx ring need not be kept full of empty buffers. In theory,
61 * the htt host SW can dynamically track the low-water mark in the
62 * rx ring, and dynamically adjust the level to which the rx ring
63 * is filled with empty buffers, to dynamically meet the desired
66 * In contrast, it's difficult to resize the rx ring itself, once
67 * it's in use. Thus, the ring itself should be sized very
68 * conservatively, while the degree to which the ring is filled
69 * with empty buffers should be sized moderately conservatively.
72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
74 htt
->max_throughput_mbps
+
76 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_MAX_MS
;
78 if (size
< HTT_RX_RING_SIZE_MIN
)
79 size
= HTT_RX_RING_SIZE_MIN
;
81 if (size
> HTT_RX_RING_SIZE_MAX
)
82 size
= HTT_RX_RING_SIZE_MAX
;
84 size
= roundup_pow_of_two(size
);
89 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt
*htt
)
93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
95 htt
->max_throughput_mbps
*
97 (8 * HTT_RX_AVG_FRM_BYTES
) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
;
100 * Make sure the fill level is at least 1 less than the ring size.
101 * Leaving 1 element empty allows the SW to easily distinguish
102 * between a full ring vs. an empty ring.
104 if (size
>= htt
->rx_ring
.size
)
105 size
= htt
->rx_ring
.size
- 1;
110 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
113 struct ath10k_skb_cb
*cb
;
116 for (i
= 0; i
< htt
->rx_ring
.fill_cnt
; i
++) {
117 skb
= htt
->rx_ring
.netbufs_ring
[i
];
118 cb
= ATH10K_SKB_CB(skb
);
119 dma_unmap_single(htt
->ar
->dev
, cb
->paddr
,
120 skb
->len
+ skb_tailroom(skb
),
122 dev_kfree_skb_any(skb
);
125 htt
->rx_ring
.fill_cnt
= 0;
128 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
130 struct htt_rx_desc
*rx_desc
;
135 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
137 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
143 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
145 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
148 /* Clear rx_desc attention word before posting to Rx ring */
149 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
150 rx_desc
->attention
.flags
= __cpu_to_le32(0);
152 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
153 skb
->len
+ skb_tailroom(skb
),
156 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
157 dev_kfree_skb_any(skb
);
162 ATH10K_SKB_CB(skb
)->paddr
= paddr
;
163 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
164 htt
->rx_ring
.paddrs_ring
[idx
] = __cpu_to_le32(paddr
);
165 htt
->rx_ring
.fill_cnt
++;
169 idx
&= htt
->rx_ring
.size_mask
;
173 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
177 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
179 lockdep_assert_held(&htt
->rx_ring
.lock
);
180 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
183 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
185 int ret
, num_deficit
, num_to_fill
;
187 /* Refilling the whole RX ring buffer proves to be a bad idea. The
188 * reason is RX may take up significant amount of CPU cycles and starve
189 * other tasks, e.g. TX on an ethernet device while acting as a bridge
190 * with ath10k wlan interface. This ended up with very poor performance
191 * once CPU the host system was overwhelmed with RX on ath10k.
193 * By limiting the number of refills the replenishing occurs
194 * progressively. This in turns makes use of the fact tasklets are
195 * processed in FIFO order. This means actual RX processing can starve
196 * out refilling. If there's not enough buffers on RX ring FW will not
197 * report RX until it is refilled with enough buffers. This
198 * automatically balances load wrt to CPU power.
200 * This probably comes at a cost of lower maximum throughput but
201 * improves the avarage and stability. */
202 spin_lock_bh(&htt
->rx_ring
.lock
);
203 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
204 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
205 num_deficit
-= num_to_fill
;
206 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
207 if (ret
== -ENOMEM
) {
209 * Failed to fill it to the desired level -
210 * we'll start a timer and try again next time.
211 * As long as enough buffers are left in the ring for
212 * another A-MPDU rx, no special recovery is needed.
214 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
216 } else if (num_deficit
> 0) {
217 tasklet_schedule(&htt
->rx_replenish_task
);
219 spin_unlock_bh(&htt
->rx_ring
.lock
);
222 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg
)
224 struct ath10k_htt
*htt
= (struct ath10k_htt
*)arg
;
226 ath10k_htt_rx_msdu_buff_replenish(htt
);
229 static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt
*htt
)
234 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
235 skb
= htt
->rx_ring
.netbufs_ring
[i
];
239 dma_unmap_single(htt
->ar
->dev
, ATH10K_SKB_CB(skb
)->paddr
,
240 skb
->len
+ skb_tailroom(skb
),
242 dev_kfree_skb_any(skb
);
243 htt
->rx_ring
.netbufs_ring
[i
] = NULL
;
247 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
249 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
250 tasklet_kill(&htt
->rx_replenish_task
);
251 tasklet_kill(&htt
->txrx_compl_task
);
253 skb_queue_purge(&htt
->tx_compl_q
);
254 skb_queue_purge(&htt
->rx_compl_q
);
256 ath10k_htt_rx_ring_clean_up(htt
);
258 dma_free_coherent(htt
->ar
->dev
,
260 sizeof(htt
->rx_ring
.paddrs_ring
)),
261 htt
->rx_ring
.paddrs_ring
,
262 htt
->rx_ring
.base_paddr
);
264 dma_free_coherent(htt
->ar
->dev
,
265 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
266 htt
->rx_ring
.alloc_idx
.vaddr
,
267 htt
->rx_ring
.alloc_idx
.paddr
);
269 kfree(htt
->rx_ring
.netbufs_ring
);
272 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
274 struct ath10k
*ar
= htt
->ar
;
276 struct sk_buff
*msdu
;
278 lockdep_assert_held(&htt
->rx_ring
.lock
);
280 if (htt
->rx_ring
.fill_cnt
== 0) {
281 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
285 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
286 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
287 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
290 idx
&= htt
->rx_ring
.size_mask
;
291 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
292 htt
->rx_ring
.fill_cnt
--;
297 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff
*skb
)
299 struct sk_buff
*next
;
303 dev_kfree_skb_any(skb
);
308 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
309 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
310 u8
**fw_desc
, int *fw_desc_len
,
311 struct sk_buff
**head_msdu
,
312 struct sk_buff
**tail_msdu
,
315 struct ath10k
*ar
= htt
->ar
;
316 int msdu_len
, msdu_chaining
= 0;
317 struct sk_buff
*msdu
, *next
;
318 struct htt_rx_desc
*rx_desc
;
320 lockdep_assert_held(&htt
->rx_ring
.lock
);
322 if (htt
->rx_confused
) {
323 ath10k_warn(ar
, "htt is confused. refusing rx\n");
327 msdu
= *head_msdu
= ath10k_htt_rx_netbuf_pop(htt
);
329 int last_msdu
, msdu_len_invalid
, msdu_chained
;
331 dma_unmap_single(htt
->ar
->dev
,
332 ATH10K_SKB_CB(msdu
)->paddr
,
333 msdu
->len
+ skb_tailroom(msdu
),
336 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx pop: ",
337 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
339 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
341 /* FIXME: we must report msdu payload since this is what caller
343 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
344 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
347 * Sanity check - confirm the HW is finished filling in the
349 * If the HW and SW are working correctly, then it's guaranteed
350 * that the HW's MAC DMA is done before this point in the SW.
351 * To prevent the case that we handle a stale Rx descriptor,
352 * just assert for now until we have a way to recover.
354 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
355 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
356 ath10k_htt_rx_free_msdu_chain(*head_msdu
);
359 ath10k_err(ar
, "htt rx stopped. cannot recover\n");
360 htt
->rx_confused
= true;
364 *attention
|= __le32_to_cpu(rx_desc
->attention
.flags
) &
365 (RX_ATTENTION_FLAGS_TKIP_MIC_ERR
|
366 RX_ATTENTION_FLAGS_DECRYPT_ERR
|
367 RX_ATTENTION_FLAGS_FCS_ERR
|
368 RX_ATTENTION_FLAGS_MGMT_TYPE
);
370 * Copy the FW rx descriptor for this MSDU from the rx
371 * indication message into the MSDU's netbuf. HL uses the
372 * same rx indication message definition as LL, and simply
373 * appends new info (fields from the HW rx desc, and the
374 * MSDU payload itself). So, the offset into the rx
375 * indication message only has to account for the standard
376 * offset of the per-MSDU FW rx desc info within the
377 * message, and how many bytes of the per-MSDU FW rx desc
378 * info have already been consumed. (And the endianness of
379 * the host, since for a big-endian host, the rx ind
380 * message contents, including the per-MSDU rx desc bytes,
381 * were byteswapped during upload.)
383 if (*fw_desc_len
> 0) {
384 rx_desc
->fw_desc
.info0
= **fw_desc
;
386 * The target is expected to only provide the basic
387 * per-MSDU rx descriptors. Just to be sure, verify
388 * that the target has not attached extension data
389 * (e.g. LRO flow ID).
392 /* or more, if there's extension data */
397 * When an oversized AMSDU happened, FW will lost
398 * some of MSDU status - in this case, the FW
399 * descriptors provided will be less than the
400 * actual MSDUs inside this MPDU. Mark the FW
401 * descriptors so that it will still deliver to
402 * upper stack, if no CRC error for this MPDU.
404 * FIX THIS - the FW descriptors are actually for
405 * MSDUs in the end of this A-MSDU instead of the
408 rx_desc
->fw_desc
.info0
= 0;
411 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
412 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
413 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
414 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.info0
),
415 RX_MSDU_START_INFO0_MSDU_LENGTH
);
416 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
418 if (msdu_len_invalid
)
422 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
423 msdu_len
-= msdu
->len
;
425 /* FIXME: Do chained buffers include htt_rx_desc or not? */
426 while (msdu_chained
--) {
427 struct sk_buff
*next
= ath10k_htt_rx_netbuf_pop(htt
);
429 dma_unmap_single(htt
->ar
->dev
,
430 ATH10K_SKB_CB(next
)->paddr
,
431 next
->len
+ skb_tailroom(next
),
434 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
435 "htt rx chained: ", next
->data
,
436 next
->len
+ skb_tailroom(next
));
439 skb_put(next
, min(msdu_len
, HTT_RX_BUF_SIZE
));
440 msdu_len
-= next
->len
;
447 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.info0
) &
448 RX_MSDU_END_INFO0_LAST_MSDU
;
455 next
= ath10k_htt_rx_netbuf_pop(htt
);
461 if (*head_msdu
== NULL
)
465 * Don't refill the ring yet.
467 * First, the elements popped here are still in use - it is not
468 * safe to overwrite them until the matching call to
469 * mpdu_desc_list_next. Second, for efficiency it is preferable to
470 * refill the rx ring with 1 PPDU's worth of rx buffers (something
471 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
472 * (something like 3 buffers). Consequently, we'll rely on the txrx
473 * SW to tell us when it is done pulling all the PPDU's rx buffers
474 * out of the rx ring, and then refill it just once.
477 return msdu_chaining
;
480 static void ath10k_htt_rx_replenish_task(unsigned long ptr
)
482 struct ath10k_htt
*htt
= (struct ath10k_htt
*)ptr
;
484 ath10k_htt_rx_msdu_buff_replenish(htt
);
487 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
489 struct ath10k
*ar
= htt
->ar
;
493 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
495 htt
->rx_ring
.size
= ath10k_htt_rx_ring_size(htt
);
496 if (!is_power_of_2(htt
->rx_ring
.size
)) {
497 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
501 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
504 * Set the initial value for the level to which the rx ring
505 * should be filled, based on the max throughput and the
506 * worst likely latency for the host to fill the rx ring
507 * with new buffers. In theory, this fill level can be
508 * dynamically adjusted from the initial value set here, to
509 * reflect the actual host latency rather than a
510 * conservative assumption about the host latency.
512 htt
->rx_ring
.fill_level
= ath10k_htt_rx_ring_fill_level(htt
);
514 htt
->rx_ring
.netbufs_ring
=
515 kzalloc(htt
->rx_ring
.size
* sizeof(struct sk_buff
*),
517 if (!htt
->rx_ring
.netbufs_ring
)
520 size
= htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring
);
522 vaddr
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_DMA
);
526 htt
->rx_ring
.paddrs_ring
= vaddr
;
527 htt
->rx_ring
.base_paddr
= paddr
;
529 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
530 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
535 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
536 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
537 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
538 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
540 /* Initialize the Rx refill retry timer */
541 setup_timer(timer
, ath10k_htt_rx_ring_refill_retry
, (unsigned long)htt
);
543 spin_lock_init(&htt
->rx_ring
.lock
);
545 htt
->rx_ring
.fill_cnt
= 0;
546 if (__ath10k_htt_rx_ring_fill_n(htt
, htt
->rx_ring
.fill_level
))
549 tasklet_init(&htt
->rx_replenish_task
, ath10k_htt_rx_replenish_task
,
552 skb_queue_head_init(&htt
->tx_compl_q
);
553 skb_queue_head_init(&htt
->rx_compl_q
);
555 tasklet_init(&htt
->txrx_compl_task
, ath10k_htt_txrx_compl_task
,
558 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
559 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
563 ath10k_htt_rx_ring_free(htt
);
564 dma_free_coherent(htt
->ar
->dev
,
565 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
566 htt
->rx_ring
.alloc_idx
.vaddr
,
567 htt
->rx_ring
.alloc_idx
.paddr
);
569 dma_free_coherent(htt
->ar
->dev
,
571 sizeof(htt
->rx_ring
.paddrs_ring
)),
572 htt
->rx_ring
.paddrs_ring
,
573 htt
->rx_ring
.base_paddr
);
575 kfree(htt
->rx_ring
.netbufs_ring
);
580 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
581 enum htt_rx_mpdu_encrypt_type type
)
584 case HTT_RX_MPDU_ENCRYPT_WEP40
:
585 case HTT_RX_MPDU_ENCRYPT_WEP104
:
587 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
588 case HTT_RX_MPDU_ENCRYPT_WEP128
: /* not tested */
589 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
590 case HTT_RX_MPDU_ENCRYPT_WAPI
: /* not tested */
591 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
593 case HTT_RX_MPDU_ENCRYPT_NONE
:
597 ath10k_warn(ar
, "unknown encryption type %d\n", type
);
601 static int ath10k_htt_rx_crypto_tail_len(struct ath10k
*ar
,
602 enum htt_rx_mpdu_encrypt_type type
)
605 case HTT_RX_MPDU_ENCRYPT_NONE
:
606 case HTT_RX_MPDU_ENCRYPT_WEP40
:
607 case HTT_RX_MPDU_ENCRYPT_WEP104
:
608 case HTT_RX_MPDU_ENCRYPT_WEP128
:
609 case HTT_RX_MPDU_ENCRYPT_WAPI
:
611 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
612 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
614 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
618 ath10k_warn(ar
, "unknown encryption type %d\n", type
);
622 /* Applies for first msdu in chain, before altering it. */
623 static struct ieee80211_hdr
*ath10k_htt_rx_skb_get_hdr(struct sk_buff
*skb
)
625 struct htt_rx_desc
*rxd
;
626 enum rx_msdu_decap_format fmt
;
628 rxd
= (void *)skb
->data
- sizeof(*rxd
);
629 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
630 RX_MSDU_START_INFO1_DECAP_FORMAT
);
632 if (fmt
== RX_MSDU_DECAP_RAW
)
633 return (void *)skb
->data
;
635 return (void *)skb
->data
- RX_HTT_HDR_STATUS_LEN
;
638 /* This function only applies for first msdu in an msdu chain */
639 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr
*hdr
)
643 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
644 qc
= ieee80211_get_qos_ctl(hdr
);
659 struct amsdu_subframe_hdr
{
665 static const u8 rx_legacy_rate_idx
[] = {
666 3, /* 0x00 - 11Mbps */
667 2, /* 0x01 - 5.5Mbps */
668 1, /* 0x02 - 2Mbps */
669 0, /* 0x03 - 1Mbps */
670 3, /* 0x04 - 11Mbps */
671 2, /* 0x05 - 5.5Mbps */
672 1, /* 0x06 - 2Mbps */
673 0, /* 0x07 - 1Mbps */
674 10, /* 0x08 - 48Mbps */
675 8, /* 0x09 - 24Mbps */
676 6, /* 0x0A - 12Mbps */
677 4, /* 0x0B - 6Mbps */
678 11, /* 0x0C - 54Mbps */
679 9, /* 0x0D - 36Mbps */
680 7, /* 0x0E - 18Mbps */
681 5, /* 0x0F - 9Mbps */
684 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
685 enum ieee80211_band band
,
686 u8 info0
, u32 info1
, u32 info2
,
687 struct ieee80211_rx_status
*status
)
689 u8 cck
, rate
, rate_idx
, bw
, sgi
, mcs
, nss
;
692 /* Check if valid fields */
693 if (!(info0
& HTT_RX_INDICATION_INFO0_START_VALID
))
696 preamble
= MS(info1
, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE
);
700 cck
= info0
& HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK
;
701 rate
= MS(info0
, HTT_RX_INDICATION_INFO0_LEGACY_RATE
);
704 if (rate
< 0x08 || rate
> 0x0F)
708 case IEEE80211_BAND_2GHZ
:
711 rate_idx
= rx_legacy_rate_idx
[rate
];
713 case IEEE80211_BAND_5GHZ
:
714 rate_idx
= rx_legacy_rate_idx
[rate
];
715 /* We are using same rate table registering
716 HW - ath10k_rates[]. In case of 5GHz skip
717 CCK rates, so -4 here */
724 status
->rate_idx
= rate_idx
;
727 case HTT_RX_HT_WITH_TXBF
:
728 /* HT-SIG - Table 20-11 in info1 and info2 */
731 bw
= (info1
>> 7) & 1;
732 sgi
= (info2
>> 7) & 1;
734 status
->rate_idx
= mcs
;
735 status
->flag
|= RX_FLAG_HT
;
737 status
->flag
|= RX_FLAG_SHORT_GI
;
739 status
->flag
|= RX_FLAG_40MHZ
;
742 case HTT_RX_VHT_WITH_TXBF
:
743 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
745 mcs
= (info2
>> 4) & 0x0F;
746 nss
= ((info1
>> 10) & 0x07) + 1;
750 status
->rate_idx
= mcs
;
751 status
->vht_nss
= nss
;
754 status
->flag
|= RX_FLAG_SHORT_GI
;
762 status
->flag
|= RX_FLAG_40MHZ
;
766 status
->vht_flag
|= RX_VHT_FLAG_80MHZ
;
769 status
->flag
|= RX_FLAG_VHT
;
776 static void ath10k_htt_rx_h_protected(struct ath10k_htt
*htt
,
777 struct ieee80211_rx_status
*rx_status
,
779 enum htt_rx_mpdu_encrypt_type enctype
,
780 enum rx_msdu_decap_format fmt
,
783 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
785 rx_status
->flag
&= ~(RX_FLAG_DECRYPTED
|
786 RX_FLAG_IV_STRIPPED
|
787 RX_FLAG_MMIC_STRIPPED
);
789 if (enctype
== HTT_RX_MPDU_ENCRYPT_NONE
)
793 * There's no explicit rx descriptor flag to indicate whether a given
794 * frame has been decrypted or not. We're forced to use the decap
795 * format as an implicit indication. However fragmentation rx is always
796 * raw and it probably never reports undecrypted raws.
798 * This makes sure sniffed frames are reported as-is without stripping
799 * the protected flag.
801 if (fmt
== RX_MSDU_DECAP_RAW
&& !dot11frag
)
804 rx_status
->flag
|= RX_FLAG_DECRYPTED
|
805 RX_FLAG_IV_STRIPPED
|
806 RX_FLAG_MMIC_STRIPPED
;
807 hdr
->frame_control
= __cpu_to_le16(__le16_to_cpu(hdr
->frame_control
) &
808 ~IEEE80211_FCTL_PROTECTED
);
811 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
812 struct ieee80211_rx_status
*status
)
814 struct ieee80211_channel
*ch
;
816 spin_lock_bh(&ar
->data_lock
);
817 ch
= ar
->scan_channel
;
820 spin_unlock_bh(&ar
->data_lock
);
825 status
->band
= ch
->band
;
826 status
->freq
= ch
->center_freq
;
831 static const char * const tid_to_ac
[] = {
842 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
847 if (!ieee80211_is_data_qos(hdr
->frame_control
))
850 qc
= ieee80211_get_qos_ctl(hdr
);
851 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
853 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
855 snprintf(out
, size
, "tid %d", tid
);
860 static void ath10k_process_rx(struct ath10k
*ar
,
861 struct ieee80211_rx_status
*rx_status
,
864 struct ieee80211_rx_status
*status
;
865 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
868 status
= IEEE80211_SKB_RXCB(skb
);
869 *status
= *rx_status
;
871 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
872 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
875 ieee80211_get_SA(hdr
),
876 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
877 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
879 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
880 status
->flag
== 0 ? "legacy" : "",
881 status
->flag
& RX_FLAG_HT
? "ht" : "",
882 status
->flag
& RX_FLAG_VHT
? "vht" : "",
883 status
->flag
& RX_FLAG_40MHZ
? "40" : "",
884 status
->vht_flag
& RX_VHT_FLAG_80MHZ
? "80" : "",
885 status
->flag
& RX_FLAG_SHORT_GI
? "sgi " : "",
889 status
->band
, status
->flag
,
890 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
891 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
892 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
893 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
894 skb
->data
, skb
->len
);
896 ieee80211_rx(ar
->hw
, skb
);
899 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr
*hdr
)
901 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
902 return round_up(ieee80211_hdrlen(hdr
->frame_control
), 4);
905 static void ath10k_htt_rx_amsdu(struct ath10k_htt
*htt
,
906 struct ieee80211_rx_status
*rx_status
,
907 struct sk_buff
*skb_in
)
909 struct ath10k
*ar
= htt
->ar
;
910 struct htt_rx_desc
*rxd
;
911 struct sk_buff
*skb
= skb_in
;
912 struct sk_buff
*first
;
913 enum rx_msdu_decap_format fmt
;
914 enum htt_rx_mpdu_encrypt_type enctype
;
915 struct ieee80211_hdr
*hdr
;
916 u8 hdr_buf
[64], da
[ETH_ALEN
], sa
[ETH_ALEN
], *qos
;
917 unsigned int hdr_len
;
919 rxd
= (void *)skb
->data
- sizeof(*rxd
);
920 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
921 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
923 hdr
= (struct ieee80211_hdr
*)rxd
->rx_hdr_status
;
924 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
925 memcpy(hdr_buf
, hdr
, hdr_len
);
926 hdr
= (struct ieee80211_hdr
*)hdr_buf
;
933 rxd
= (void *)skb
->data
- sizeof(*rxd
);
934 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
935 RX_MSDU_START_INFO1_DECAP_FORMAT
);
936 decap_hdr
= (void *)rxd
->rx_hdr_status
;
938 skb
->ip_summed
= ath10k_htt_rx_get_csum_state(skb
);
940 /* First frame in an A-MSDU chain has more decapped data. */
942 len
= round_up(ieee80211_hdrlen(hdr
->frame_control
), 4);
943 len
+= round_up(ath10k_htt_rx_crypto_param_len(ar
,
949 case RX_MSDU_DECAP_RAW
:
950 /* remove trailing FCS */
951 skb_trim(skb
, skb
->len
- FCS_LEN
);
953 case RX_MSDU_DECAP_NATIVE_WIFI
:
954 /* pull decapped header and copy SA & DA */
955 hdr
= (struct ieee80211_hdr
*)skb
->data
;
956 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(hdr
);
957 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
958 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
959 skb_pull(skb
, hdr_len
);
961 /* push original 802.11 header */
962 hdr
= (struct ieee80211_hdr
*)hdr_buf
;
963 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
964 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
966 /* original A-MSDU header has the bit set but we're
967 * not including A-MSDU subframe header */
968 hdr
= (struct ieee80211_hdr
*)skb
->data
;
969 qos
= ieee80211_get_qos_ctl(hdr
);
970 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
972 /* original 802.11 header has a different DA and in
973 * case of 4addr it may also have different SA
975 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
976 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
978 case RX_MSDU_DECAP_ETHERNET2_DIX
:
979 /* strip ethernet header and insert decapped 802.11
980 * header, amsdu subframe header and rfc1042 header */
983 len
+= sizeof(struct rfc1042_hdr
);
984 len
+= sizeof(struct amsdu_subframe_hdr
);
986 skb_pull(skb
, sizeof(struct ethhdr
));
987 memcpy(skb_push(skb
, len
), decap_hdr
, len
);
988 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
990 case RX_MSDU_DECAP_8023_SNAP_LLC
:
991 /* insert decapped 802.11 header making a singly
993 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
998 ath10k_htt_rx_h_protected(htt
, rx_status
, skb_in
, enctype
, fmt
,
1001 skb_in
->next
= NULL
;
1004 rx_status
->flag
|= RX_FLAG_AMSDU_MORE
;
1006 rx_status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
1008 ath10k_process_rx(htt
->ar
, rx_status
, skb_in
);
1011 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
1012 * monitor interface active for sniffing purposes. */
1015 static void ath10k_htt_rx_msdu(struct ath10k_htt
*htt
,
1016 struct ieee80211_rx_status
*rx_status
,
1017 struct sk_buff
*skb
)
1019 struct ath10k
*ar
= htt
->ar
;
1020 struct htt_rx_desc
*rxd
;
1021 struct ieee80211_hdr
*hdr
;
1022 enum rx_msdu_decap_format fmt
;
1023 enum htt_rx_mpdu_encrypt_type enctype
;
1027 /* This shouldn't happen. If it does than it may be a FW bug. */
1029 ath10k_warn(ar
, "htt rx received chained non A-MSDU frame\n");
1030 ath10k_htt_rx_free_msdu_chain(skb
->next
);
1034 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1035 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
1036 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1037 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1038 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1039 hdr
= (struct ieee80211_hdr
*)rxd
->rx_hdr_status
;
1040 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1042 skb
->ip_summed
= ath10k_htt_rx_get_csum_state(skb
);
1045 case RX_MSDU_DECAP_RAW
:
1046 /* remove trailing FCS */
1047 skb_trim(skb
, skb
->len
- FCS_LEN
);
1049 case RX_MSDU_DECAP_NATIVE_WIFI
:
1050 /* Pull decapped header */
1051 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1052 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(hdr
);
1053 skb_pull(skb
, hdr_len
);
1055 /* Push original header */
1056 hdr
= (struct ieee80211_hdr
*)rxd
->rx_hdr_status
;
1057 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1058 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
1060 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1061 /* strip ethernet header and insert decapped 802.11 header and
1065 rfc1042
+= roundup(hdr_len
, 4);
1066 rfc1042
+= roundup(ath10k_htt_rx_crypto_param_len(ar
,
1069 skb_pull(skb
, sizeof(struct ethhdr
));
1070 memcpy(skb_push(skb
, sizeof(struct rfc1042_hdr
)),
1071 rfc1042
, sizeof(struct rfc1042_hdr
));
1072 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
1074 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1075 /* remove A-MSDU subframe header and insert
1076 * decapped 802.11 header. rfc1042 header is already there */
1078 skb_pull(skb
, sizeof(struct amsdu_subframe_hdr
));
1079 memcpy(skb_push(skb
, hdr_len
), hdr
, hdr_len
);
1083 ath10k_htt_rx_h_protected(htt
, rx_status
, skb
, enctype
, fmt
, false);
1085 ath10k_process_rx(htt
->ar
, rx_status
, skb
);
1088 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1090 struct htt_rx_desc
*rxd
;
1092 bool is_ip4
, is_ip6
;
1093 bool is_tcp
, is_udp
;
1094 bool ip_csum_ok
, tcpudp_csum_ok
;
1096 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1097 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1098 info
= __le32_to_cpu(rxd
->msdu_start
.info1
);
1100 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1101 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1102 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1103 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1104 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1105 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1107 if (!is_ip4
&& !is_ip6
)
1108 return CHECKSUM_NONE
;
1109 if (!is_tcp
&& !is_udp
)
1110 return CHECKSUM_NONE
;
1112 return CHECKSUM_NONE
;
1113 if (!tcpudp_csum_ok
)
1114 return CHECKSUM_NONE
;
1116 return CHECKSUM_UNNECESSARY
;
1119 static int ath10k_unchain_msdu(struct sk_buff
*msdu_head
)
1121 struct sk_buff
*next
= msdu_head
->next
;
1122 struct sk_buff
*to_free
= next
;
1126 /* TODO: Might could optimize this by using
1127 * skb_try_coalesce or similar method to
1128 * decrease copying, or maybe get mac80211 to
1129 * provide a way to just receive a list of
1133 msdu_head
->next
= NULL
;
1135 /* Allocate total length all at once. */
1137 total_len
+= next
->len
;
1141 space
= total_len
- skb_tailroom(msdu_head
);
1143 (pskb_expand_head(msdu_head
, 0, space
, GFP_ATOMIC
) < 0)) {
1144 /* TODO: bump some rx-oom error stat */
1145 /* put it back together so we can free the
1146 * whole list at once.
1148 msdu_head
->next
= to_free
;
1152 /* Walk list again, copying contents into
1157 skb_copy_from_linear_data(next
, skb_put(msdu_head
, next
->len
),
1162 /* If here, we have consolidated skb. Free the
1163 * fragments and pass the main skb on up the
1166 ath10k_htt_rx_free_msdu_chain(to_free
);
1170 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt
*htt
,
1171 struct sk_buff
*head
,
1172 enum htt_rx_mpdu_status status
,
1176 struct ath10k
*ar
= htt
->ar
;
1178 if (head
->len
== 0) {
1179 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1180 "htt rx dropping due to zero-len\n");
1184 if (attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
) {
1185 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1186 "htt rx dropping due to decrypt-err\n");
1191 ath10k_warn(ar
, "no channel configured; ignoring frame!\n");
1195 /* Skip mgmt frames while we handle this in WMI */
1196 if (status
== HTT_RX_IND_MPDU_STATUS_MGMT_CTRL
||
1197 attention
& RX_ATTENTION_FLAGS_MGMT_TYPE
) {
1198 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx mgmt ctrl\n");
1202 if (status
!= HTT_RX_IND_MPDU_STATUS_OK
&&
1203 status
!= HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
&&
1204 status
!= HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER
&&
1205 !htt
->ar
->monitor_started
) {
1206 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1207 "htt rx ignoring frame w/ status %d\n",
1212 if (test_bit(ATH10K_CAC_RUNNING
, &htt
->ar
->dev_flags
)) {
1213 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1214 "htt rx CAC running\n");
1221 static void ath10k_htt_rx_handler(struct ath10k_htt
*htt
,
1222 struct htt_rx_indication
*rx
)
1224 struct ath10k
*ar
= htt
->ar
;
1225 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
1226 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
1227 struct htt_rx_desc
*rxd
;
1228 enum htt_rx_mpdu_status status
;
1229 struct ieee80211_hdr
*hdr
;
1230 int num_mpdu_ranges
;
1238 lockdep_assert_held(&htt
->rx_ring
.lock
);
1240 fw_desc_len
= __le16_to_cpu(rx
->prefix
.fw_rx_desc_bytes
);
1241 fw_desc
= (u8
*)&rx
->fw_desc
;
1243 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
1244 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
1245 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
1247 /* Fill this once, while this is per-ppdu */
1248 if (rx
->ppdu
.info0
& HTT_RX_INDICATION_INFO0_START_VALID
) {
1249 memset(rx_status
, 0, sizeof(*rx_status
));
1250 rx_status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
1251 rx
->ppdu
.combined_rssi
;
1254 if (rx
->ppdu
.info0
& HTT_RX_INDICATION_INFO0_END_VALID
) {
1255 /* TSF available only in 32-bit */
1256 rx_status
->mactime
= __le32_to_cpu(rx
->ppdu
.tsf
) & 0xffffffff;
1257 rx_status
->flag
|= RX_FLAG_MACTIME_END
;
1260 channel_set
= ath10k_htt_rx_h_channel(htt
->ar
, rx_status
);
1263 ath10k_htt_rx_h_rates(htt
->ar
, rx_status
->band
,
1265 __le32_to_cpu(rx
->ppdu
.info1
),
1266 __le32_to_cpu(rx
->ppdu
.info2
),
1270 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
1272 (sizeof(struct htt_rx_indication_mpdu_range
) *
1275 for (i
= 0; i
< num_mpdu_ranges
; i
++) {
1276 status
= mpdu_ranges
[i
].mpdu_range_status
;
1278 for (j
= 0; j
< mpdu_ranges
[i
].mpdu_count
; j
++) {
1279 struct sk_buff
*msdu_head
, *msdu_tail
;
1284 ret
= ath10k_htt_rx_amsdu_pop(htt
,
1292 ath10k_warn(ar
, "failed to pop amsdu from htt rx ring %d\n",
1294 ath10k_htt_rx_free_msdu_chain(msdu_head
);
1298 rxd
= container_of((void *)msdu_head
->data
,
1302 if (!ath10k_htt_rx_amsdu_allowed(htt
, msdu_head
,
1306 ath10k_htt_rx_free_msdu_chain(msdu_head
);
1311 ath10k_unchain_msdu(msdu_head
) < 0) {
1312 ath10k_htt_rx_free_msdu_chain(msdu_head
);
1316 if (attention
& RX_ATTENTION_FLAGS_FCS_ERR
)
1317 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1319 rx_status
->flag
&= ~RX_FLAG_FAILED_FCS_CRC
;
1321 if (attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
)
1322 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
1324 rx_status
->flag
&= ~RX_FLAG_MMIC_ERROR
;
1326 hdr
= ath10k_htt_rx_skb_get_hdr(msdu_head
);
1328 if (ath10k_htt_rx_hdr_is_amsdu(hdr
))
1329 ath10k_htt_rx_amsdu(htt
, rx_status
, msdu_head
);
1331 ath10k_htt_rx_msdu(htt
, rx_status
, msdu_head
);
1335 tasklet_schedule(&htt
->rx_replenish_task
);
1338 static void ath10k_htt_rx_frag_handler(struct ath10k_htt
*htt
,
1339 struct htt_rx_fragment_indication
*frag
)
1341 struct ath10k
*ar
= htt
->ar
;
1342 struct sk_buff
*msdu_head
, *msdu_tail
;
1343 enum htt_rx_mpdu_encrypt_type enctype
;
1344 struct htt_rx_desc
*rxd
;
1345 enum rx_msdu_decap_format fmt
;
1346 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
1347 struct ieee80211_hdr
*hdr
;
1352 int fw_desc_len
, hdrlen
, paramlen
;
1356 fw_desc_len
= __le16_to_cpu(frag
->fw_rx_desc_bytes
);
1357 fw_desc
= (u8
*)frag
->fw_msdu_rx_desc
;
1362 spin_lock_bh(&htt
->rx_ring
.lock
);
1363 ret
= ath10k_htt_rx_amsdu_pop(htt
, &fw_desc
, &fw_desc_len
,
1364 &msdu_head
, &msdu_tail
,
1366 spin_unlock_bh(&htt
->rx_ring
.lock
);
1368 ath10k_dbg(ar
, ATH10K_DBG_HTT_DUMP
, "htt rx frag ahead\n");
1371 ath10k_warn(ar
, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1373 ath10k_htt_rx_free_msdu_chain(msdu_head
);
1377 /* FIXME: implement signal strength */
1378 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1380 hdr
= (struct ieee80211_hdr
*)msdu_head
->data
;
1381 rxd
= (void *)msdu_head
->data
- sizeof(*rxd
);
1382 tkip_mic_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1383 decrypt_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1384 fmt
= MS(__le32_to_cpu(rxd
->msdu_start
.info1
),
1385 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1387 if (fmt
!= RX_MSDU_DECAP_RAW
) {
1388 ath10k_warn(ar
, "we dont support non-raw fragmented rx yet\n");
1389 dev_kfree_skb_any(msdu_head
);
1393 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1394 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1395 ath10k_htt_rx_h_protected(htt
, rx_status
, msdu_head
, enctype
, fmt
,
1397 msdu_head
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu_head
);
1400 ath10k_warn(ar
, "tkip mic error\n");
1403 ath10k_warn(ar
, "decryption err in fragmented rx\n");
1404 dev_kfree_skb_any(msdu_head
);
1408 if (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
) {
1409 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
1410 paramlen
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1412 /* It is more efficient to move the header than the payload */
1413 memmove((void *)msdu_head
->data
+ paramlen
,
1414 (void *)msdu_head
->data
,
1416 skb_pull(msdu_head
, paramlen
);
1417 hdr
= (struct ieee80211_hdr
*)msdu_head
->data
;
1420 /* remove trailing FCS */
1423 /* remove crypto trailer */
1424 trim
+= ath10k_htt_rx_crypto_tail_len(ar
, enctype
);
1426 /* last fragment of TKIP frags has MIC */
1427 if (!ieee80211_has_morefrags(hdr
->frame_control
) &&
1428 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1431 if (trim
> msdu_head
->len
) {
1432 ath10k_warn(ar
, "htt rx fragment: trailer longer than the frame itself? drop\n");
1433 dev_kfree_skb_any(msdu_head
);
1437 skb_trim(msdu_head
, msdu_head
->len
- trim
);
1439 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx frag mpdu: ",
1440 msdu_head
->data
, msdu_head
->len
);
1441 ath10k_process_rx(htt
->ar
, rx_status
, msdu_head
);
1444 if (fw_desc_len
> 0) {
1445 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1446 "expecting more fragmented rx in one indication %d\n",
1451 static void ath10k_htt_rx_frm_tx_compl(struct ath10k
*ar
,
1452 struct sk_buff
*skb
)
1454 struct ath10k_htt
*htt
= &ar
->htt
;
1455 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1456 struct htt_tx_done tx_done
= {};
1457 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
1461 lockdep_assert_held(&htt
->tx_lock
);
1464 case HTT_DATA_TX_STATUS_NO_ACK
:
1465 tx_done
.no_ack
= true;
1467 case HTT_DATA_TX_STATUS_OK
:
1469 case HTT_DATA_TX_STATUS_DISCARD
:
1470 case HTT_DATA_TX_STATUS_POSTPONE
:
1471 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
1472 tx_done
.discard
= true;
1475 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
1476 tx_done
.discard
= true;
1480 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
1481 resp
->data_tx_completion
.num_msdus
);
1483 for (i
= 0; i
< resp
->data_tx_completion
.num_msdus
; i
++) {
1484 msdu_id
= resp
->data_tx_completion
.msdus
[i
];
1485 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
1486 ath10k_txrx_tx_unref(htt
, &tx_done
);
1490 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
1492 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
1493 struct ath10k_peer
*peer
;
1494 struct ath10k_vif
*arvif
;
1495 u16 info0
, tid
, peer_id
;
1497 info0
= __le16_to_cpu(ev
->info0
);
1498 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
1499 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
1501 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1502 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1503 tid
, peer_id
, ev
->window_size
);
1505 spin_lock_bh(&ar
->data_lock
);
1506 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1508 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
1510 spin_unlock_bh(&ar
->data_lock
);
1514 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1516 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
1518 spin_unlock_bh(&ar
->data_lock
);
1522 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1523 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1524 peer
->addr
, tid
, ev
->window_size
);
1526 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
1527 spin_unlock_bh(&ar
->data_lock
);
1530 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
1532 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
1533 struct ath10k_peer
*peer
;
1534 struct ath10k_vif
*arvif
;
1535 u16 info0
, tid
, peer_id
;
1537 info0
= __le16_to_cpu(ev
->info0
);
1538 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
1539 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
1541 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1542 "htt rx delba tid %hu peer_id %hu\n",
1545 spin_lock_bh(&ar
->data_lock
);
1546 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1548 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
1550 spin_unlock_bh(&ar
->data_lock
);
1554 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1556 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
1558 spin_unlock_bh(&ar
->data_lock
);
1562 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1563 "htt rx stop rx ba session sta %pM tid %hu\n",
1566 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
1567 spin_unlock_bh(&ar
->data_lock
);
1570 void ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
1572 struct ath10k_htt
*htt
= &ar
->htt
;
1573 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
1575 /* confirm alignment */
1576 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
1577 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
1579 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
1580 resp
->hdr
.msg_type
);
1581 switch (resp
->hdr
.msg_type
) {
1582 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
1583 htt
->target_version_major
= resp
->ver_resp
.major
;
1584 htt
->target_version_minor
= resp
->ver_resp
.minor
;
1585 complete(&htt
->target_version_received
);
1588 case HTT_T2H_MSG_TYPE_RX_IND
:
1589 spin_lock_bh(&htt
->rx_ring
.lock
);
1590 __skb_queue_tail(&htt
->rx_compl_q
, skb
);
1591 spin_unlock_bh(&htt
->rx_ring
.lock
);
1592 tasklet_schedule(&htt
->txrx_compl_task
);
1594 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
1595 struct htt_peer_map_event ev
= {
1596 .vdev_id
= resp
->peer_map
.vdev_id
,
1597 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
1599 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
1600 ath10k_peer_map_event(htt
, &ev
);
1603 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
1604 struct htt_peer_unmap_event ev
= {
1605 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
1607 ath10k_peer_unmap_event(htt
, &ev
);
1610 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
1611 struct htt_tx_done tx_done
= {};
1612 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
1615 __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
1618 case HTT_MGMT_TX_STATUS_OK
:
1620 case HTT_MGMT_TX_STATUS_RETRY
:
1621 tx_done
.no_ack
= true;
1623 case HTT_MGMT_TX_STATUS_DROP
:
1624 tx_done
.discard
= true;
1628 spin_lock_bh(&htt
->tx_lock
);
1629 ath10k_txrx_tx_unref(htt
, &tx_done
);
1630 spin_unlock_bh(&htt
->tx_lock
);
1633 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
1634 spin_lock_bh(&htt
->tx_lock
);
1635 __skb_queue_tail(&htt
->tx_compl_q
, skb
);
1636 spin_unlock_bh(&htt
->tx_lock
);
1637 tasklet_schedule(&htt
->txrx_compl_task
);
1639 case HTT_T2H_MSG_TYPE_SEC_IND
: {
1640 struct ath10k
*ar
= htt
->ar
;
1641 struct htt_security_indication
*ev
= &resp
->security_indication
;
1643 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1644 "sec ind peer_id %d unicast %d type %d\n",
1645 __le16_to_cpu(ev
->peer_id
),
1646 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
1647 MS(ev
->flags
, HTT_SECURITY_TYPE
));
1648 complete(&ar
->install_key_done
);
1651 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
1652 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1653 skb
->data
, skb
->len
);
1654 ath10k_htt_rx_frag_handler(htt
, &resp
->rx_frag_ind
);
1657 case HTT_T2H_MSG_TYPE_TEST
:
1660 case HTT_T2H_MSG_TYPE_STATS_CONF
:
1661 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
1663 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
1664 /* Firmware can return tx frames if it's unable to fully
1665 * process them and suspects host may be able to fix it. ath10k
1666 * sends all tx frames as already inspected so this shouldn't
1667 * happen unless fw has a bug.
1669 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
1671 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
1672 ath10k_htt_rx_addba(ar
, resp
);
1674 case HTT_T2H_MSG_TYPE_RX_DELBA
:
1675 ath10k_htt_rx_delba(ar
, resp
);
1677 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
1678 /* Ignore this event because mac80211 takes care of Rx
1679 * aggregation reordering.
1684 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt event (%d) not handled\n",
1685 resp
->hdr
.msg_type
);
1686 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
1687 skb
->data
, skb
->len
);
1691 /* Free the indication buffer */
1692 dev_kfree_skb_any(skb
);
1695 static void ath10k_htt_txrx_compl_task(unsigned long ptr
)
1697 struct ath10k_htt
*htt
= (struct ath10k_htt
*)ptr
;
1698 struct htt_resp
*resp
;
1699 struct sk_buff
*skb
;
1701 spin_lock_bh(&htt
->tx_lock
);
1702 while ((skb
= __skb_dequeue(&htt
->tx_compl_q
))) {
1703 ath10k_htt_rx_frm_tx_compl(htt
->ar
, skb
);
1704 dev_kfree_skb_any(skb
);
1706 spin_unlock_bh(&htt
->tx_lock
);
1708 spin_lock_bh(&htt
->rx_ring
.lock
);
1709 while ((skb
= __skb_dequeue(&htt
->rx_compl_q
))) {
1710 resp
= (struct htt_resp
*)skb
->data
;
1711 ath10k_htt_rx_handler(htt
, &resp
->rx_ind
);
1712 dev_kfree_skb_any(skb
);
1714 spin_unlock_bh(&htt
->rx_ring
.lock
);