dt-bindings: mailbox: Add Amlogic Meson MHU Bindings
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "core.h"
19 #include "htc.h"
20 #include "htt.h"
21 #include "txrx.h"
22 #include "debug.h"
23 #include "trace.h"
24 #include "mac.h"
25
26 #include <linux/log2.h>
27
28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
30
31 /* when under memory pressure rx ring refill may fail and needs a retry */
32 #define HTT_RX_RING_REFILL_RETRY_MS 50
33
34 #define HTT_RX_RING_REFILL_RESCHED_MS 5
35
36 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
37 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
38
39 static struct sk_buff *
40 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
41 {
42 struct ath10k_skb_rxcb *rxcb;
43
44 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
45 if (rxcb->paddr == paddr)
46 return ATH10K_RXCB_SKB(rxcb);
47
48 WARN_ON_ONCE(1);
49 return NULL;
50 }
51
52 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
53 {
54 struct sk_buff *skb;
55 struct ath10k_skb_rxcb *rxcb;
56 struct hlist_node *n;
57 int i;
58
59 if (htt->rx_ring.in_ord_rx) {
60 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
61 skb = ATH10K_RXCB_SKB(rxcb);
62 dma_unmap_single(htt->ar->dev, rxcb->paddr,
63 skb->len + skb_tailroom(skb),
64 DMA_FROM_DEVICE);
65 hash_del(&rxcb->hlist);
66 dev_kfree_skb_any(skb);
67 }
68 } else {
69 for (i = 0; i < htt->rx_ring.size; i++) {
70 skb = htt->rx_ring.netbufs_ring[i];
71 if (!skb)
72 continue;
73
74 rxcb = ATH10K_SKB_RXCB(skb);
75 dma_unmap_single(htt->ar->dev, rxcb->paddr,
76 skb->len + skb_tailroom(skb),
77 DMA_FROM_DEVICE);
78 dev_kfree_skb_any(skb);
79 }
80 }
81
82 htt->rx_ring.fill_cnt = 0;
83 hash_init(htt->rx_ring.skb_table);
84 memset(htt->rx_ring.netbufs_ring, 0,
85 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
86 }
87
88 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
89 {
90 struct htt_rx_desc *rx_desc;
91 struct ath10k_skb_rxcb *rxcb;
92 struct sk_buff *skb;
93 dma_addr_t paddr;
94 int ret = 0, idx;
95
96 /* The Full Rx Reorder firmware has no way of telling the host
97 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
98 * To keep things simple make sure ring is always half empty. This
99 * guarantees there'll be no replenishment overruns possible.
100 */
101 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
102
103 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
104 while (num > 0) {
105 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
106 if (!skb) {
107 ret = -ENOMEM;
108 goto fail;
109 }
110
111 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
112 skb_pull(skb,
113 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
114 skb->data);
115
116 /* Clear rx_desc attention word before posting to Rx ring */
117 rx_desc = (struct htt_rx_desc *)skb->data;
118 rx_desc->attention.flags = __cpu_to_le32(0);
119
120 paddr = dma_map_single(htt->ar->dev, skb->data,
121 skb->len + skb_tailroom(skb),
122 DMA_FROM_DEVICE);
123
124 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
125 dev_kfree_skb_any(skb);
126 ret = -ENOMEM;
127 goto fail;
128 }
129
130 rxcb = ATH10K_SKB_RXCB(skb);
131 rxcb->paddr = paddr;
132 htt->rx_ring.netbufs_ring[idx] = skb;
133 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
134 htt->rx_ring.fill_cnt++;
135
136 if (htt->rx_ring.in_ord_rx) {
137 hash_add(htt->rx_ring.skb_table,
138 &ATH10K_SKB_RXCB(skb)->hlist,
139 (u32)paddr);
140 }
141
142 num--;
143 idx++;
144 idx &= htt->rx_ring.size_mask;
145 }
146
147 fail:
148 /*
149 * Make sure the rx buffer is updated before available buffer
150 * index to avoid any potential rx ring corruption.
151 */
152 mb();
153 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
154 return ret;
155 }
156
157 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
158 {
159 lockdep_assert_held(&htt->rx_ring.lock);
160 return __ath10k_htt_rx_ring_fill_n(htt, num);
161 }
162
163 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
164 {
165 int ret, num_deficit, num_to_fill;
166
167 /* Refilling the whole RX ring buffer proves to be a bad idea. The
168 * reason is RX may take up significant amount of CPU cycles and starve
169 * other tasks, e.g. TX on an ethernet device while acting as a bridge
170 * with ath10k wlan interface. This ended up with very poor performance
171 * once CPU the host system was overwhelmed with RX on ath10k.
172 *
173 * By limiting the number of refills the replenishing occurs
174 * progressively. This in turns makes use of the fact tasklets are
175 * processed in FIFO order. This means actual RX processing can starve
176 * out refilling. If there's not enough buffers on RX ring FW will not
177 * report RX until it is refilled with enough buffers. This
178 * automatically balances load wrt to CPU power.
179 *
180 * This probably comes at a cost of lower maximum throughput but
181 * improves the average and stability. */
182 spin_lock_bh(&htt->rx_ring.lock);
183 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
184 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
185 num_deficit -= num_to_fill;
186 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
187 if (ret == -ENOMEM) {
188 /*
189 * Failed to fill it to the desired level -
190 * we'll start a timer and try again next time.
191 * As long as enough buffers are left in the ring for
192 * another A-MPDU rx, no special recovery is needed.
193 */
194 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
195 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
196 } else if (num_deficit > 0) {
197 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
198 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
199 }
200 spin_unlock_bh(&htt->rx_ring.lock);
201 }
202
203 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
204 {
205 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
206
207 ath10k_htt_rx_msdu_buff_replenish(htt);
208 }
209
210 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
211 {
212 struct ath10k_htt *htt = &ar->htt;
213 int ret;
214
215 spin_lock_bh(&htt->rx_ring.lock);
216 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
217 htt->rx_ring.fill_cnt));
218 spin_unlock_bh(&htt->rx_ring.lock);
219
220 if (ret)
221 ath10k_htt_rx_ring_free(htt);
222
223 return ret;
224 }
225
226 void ath10k_htt_rx_free(struct ath10k_htt *htt)
227 {
228 del_timer_sync(&htt->rx_ring.refill_retry_timer);
229 tasklet_kill(&htt->txrx_compl_task);
230
231 skb_queue_purge(&htt->rx_compl_q);
232 skb_queue_purge(&htt->rx_in_ord_compl_q);
233 skb_queue_purge(&htt->tx_fetch_ind_q);
234
235 ath10k_htt_rx_ring_free(htt);
236
237 dma_free_coherent(htt->ar->dev,
238 (htt->rx_ring.size *
239 sizeof(htt->rx_ring.paddrs_ring)),
240 htt->rx_ring.paddrs_ring,
241 htt->rx_ring.base_paddr);
242
243 dma_free_coherent(htt->ar->dev,
244 sizeof(*htt->rx_ring.alloc_idx.vaddr),
245 htt->rx_ring.alloc_idx.vaddr,
246 htt->rx_ring.alloc_idx.paddr);
247
248 kfree(htt->rx_ring.netbufs_ring);
249 }
250
251 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
252 {
253 struct ath10k *ar = htt->ar;
254 int idx;
255 struct sk_buff *msdu;
256
257 lockdep_assert_held(&htt->rx_ring.lock);
258
259 if (htt->rx_ring.fill_cnt == 0) {
260 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
261 return NULL;
262 }
263
264 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
265 msdu = htt->rx_ring.netbufs_ring[idx];
266 htt->rx_ring.netbufs_ring[idx] = NULL;
267 htt->rx_ring.paddrs_ring[idx] = 0;
268
269 idx++;
270 idx &= htt->rx_ring.size_mask;
271 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
272 htt->rx_ring.fill_cnt--;
273
274 dma_unmap_single(htt->ar->dev,
275 ATH10K_SKB_RXCB(msdu)->paddr,
276 msdu->len + skb_tailroom(msdu),
277 DMA_FROM_DEVICE);
278 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
279 msdu->data, msdu->len + skb_tailroom(msdu));
280
281 return msdu;
282 }
283
284 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
285 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
286 struct sk_buff_head *amsdu)
287 {
288 struct ath10k *ar = htt->ar;
289 int msdu_len, msdu_chaining = 0;
290 struct sk_buff *msdu;
291 struct htt_rx_desc *rx_desc;
292
293 lockdep_assert_held(&htt->rx_ring.lock);
294
295 for (;;) {
296 int last_msdu, msdu_len_invalid, msdu_chained;
297
298 msdu = ath10k_htt_rx_netbuf_pop(htt);
299 if (!msdu) {
300 __skb_queue_purge(amsdu);
301 return -ENOENT;
302 }
303
304 __skb_queue_tail(amsdu, msdu);
305
306 rx_desc = (struct htt_rx_desc *)msdu->data;
307
308 /* FIXME: we must report msdu payload since this is what caller
309 * expects now */
310 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
312
313 /*
314 * Sanity check - confirm the HW is finished filling in the
315 * rx data.
316 * If the HW and SW are working correctly, then it's guaranteed
317 * that the HW's MAC DMA is done before this point in the SW.
318 * To prevent the case that we handle a stale Rx descriptor,
319 * just assert for now until we have a way to recover.
320 */
321 if (!(__le32_to_cpu(rx_desc->attention.flags)
322 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
323 __skb_queue_purge(amsdu);
324 return -EIO;
325 }
326
327 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
328 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
329 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
330 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
331 RX_MSDU_START_INFO0_MSDU_LENGTH);
332 msdu_chained = rx_desc->frag_info.ring2_more_count;
333
334 if (msdu_len_invalid)
335 msdu_len = 0;
336
337 skb_trim(msdu, 0);
338 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
339 msdu_len -= msdu->len;
340
341 /* Note: Chained buffers do not contain rx descriptor */
342 while (msdu_chained--) {
343 msdu = ath10k_htt_rx_netbuf_pop(htt);
344 if (!msdu) {
345 __skb_queue_purge(amsdu);
346 return -ENOENT;
347 }
348
349 __skb_queue_tail(amsdu, msdu);
350 skb_trim(msdu, 0);
351 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
352 msdu_len -= msdu->len;
353 msdu_chaining = 1;
354 }
355
356 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
357 RX_MSDU_END_INFO0_LAST_MSDU;
358
359 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
360 sizeof(*rx_desc) - sizeof(u32));
361
362 if (last_msdu)
363 break;
364 }
365
366 if (skb_queue_empty(amsdu))
367 msdu_chaining = -1;
368
369 /*
370 * Don't refill the ring yet.
371 *
372 * First, the elements popped here are still in use - it is not
373 * safe to overwrite them until the matching call to
374 * mpdu_desc_list_next. Second, for efficiency it is preferable to
375 * refill the rx ring with 1 PPDU's worth of rx buffers (something
376 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
377 * (something like 3 buffers). Consequently, we'll rely on the txrx
378 * SW to tell us when it is done pulling all the PPDU's rx buffers
379 * out of the rx ring, and then refill it just once.
380 */
381
382 return msdu_chaining;
383 }
384
385 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
386 u32 paddr)
387 {
388 struct ath10k *ar = htt->ar;
389 struct ath10k_skb_rxcb *rxcb;
390 struct sk_buff *msdu;
391
392 lockdep_assert_held(&htt->rx_ring.lock);
393
394 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
395 if (!msdu)
396 return NULL;
397
398 rxcb = ATH10K_SKB_RXCB(msdu);
399 hash_del(&rxcb->hlist);
400 htt->rx_ring.fill_cnt--;
401
402 dma_unmap_single(htt->ar->dev, rxcb->paddr,
403 msdu->len + skb_tailroom(msdu),
404 DMA_FROM_DEVICE);
405 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
406 msdu->data, msdu->len + skb_tailroom(msdu));
407
408 return msdu;
409 }
410
411 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
412 struct htt_rx_in_ord_ind *ev,
413 struct sk_buff_head *list)
414 {
415 struct ath10k *ar = htt->ar;
416 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
417 struct htt_rx_desc *rxd;
418 struct sk_buff *msdu;
419 int msdu_count;
420 bool is_offload;
421 u32 paddr;
422
423 lockdep_assert_held(&htt->rx_ring.lock);
424
425 msdu_count = __le16_to_cpu(ev->msdu_count);
426 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
427
428 while (msdu_count--) {
429 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
430
431 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
432 if (!msdu) {
433 __skb_queue_purge(list);
434 return -ENOENT;
435 }
436
437 __skb_queue_tail(list, msdu);
438
439 if (!is_offload) {
440 rxd = (void *)msdu->data;
441
442 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
443
444 skb_put(msdu, sizeof(*rxd));
445 skb_pull(msdu, sizeof(*rxd));
446 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
447
448 if (!(__le32_to_cpu(rxd->attention.flags) &
449 RX_ATTENTION_FLAGS_MSDU_DONE)) {
450 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
451 return -EIO;
452 }
453 }
454
455 msdu_desc++;
456 }
457
458 return 0;
459 }
460
461 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
462 {
463 struct ath10k *ar = htt->ar;
464 dma_addr_t paddr;
465 void *vaddr;
466 size_t size;
467 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
468
469 htt->rx_confused = false;
470
471 /* XXX: The fill level could be changed during runtime in response to
472 * the host processing latency. Is this really worth it?
473 */
474 htt->rx_ring.size = HTT_RX_RING_SIZE;
475 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
476 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
477
478 if (!is_power_of_2(htt->rx_ring.size)) {
479 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
480 return -EINVAL;
481 }
482
483 htt->rx_ring.netbufs_ring =
484 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
485 GFP_KERNEL);
486 if (!htt->rx_ring.netbufs_ring)
487 goto err_netbuf;
488
489 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
490
491 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
492 if (!vaddr)
493 goto err_dma_ring;
494
495 htt->rx_ring.paddrs_ring = vaddr;
496 htt->rx_ring.base_paddr = paddr;
497
498 vaddr = dma_alloc_coherent(htt->ar->dev,
499 sizeof(*htt->rx_ring.alloc_idx.vaddr),
500 &paddr, GFP_KERNEL);
501 if (!vaddr)
502 goto err_dma_idx;
503
504 htt->rx_ring.alloc_idx.vaddr = vaddr;
505 htt->rx_ring.alloc_idx.paddr = paddr;
506 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
507 *htt->rx_ring.alloc_idx.vaddr = 0;
508
509 /* Initialize the Rx refill retry timer */
510 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
511
512 spin_lock_init(&htt->rx_ring.lock);
513
514 htt->rx_ring.fill_cnt = 0;
515 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
516 hash_init(htt->rx_ring.skb_table);
517
518 skb_queue_head_init(&htt->rx_compl_q);
519 skb_queue_head_init(&htt->rx_in_ord_compl_q);
520 skb_queue_head_init(&htt->tx_fetch_ind_q);
521 atomic_set(&htt->num_mpdus_ready, 0);
522
523 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
524 (unsigned long)htt);
525
526 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
527 htt->rx_ring.size, htt->rx_ring.fill_level);
528 return 0;
529
530 err_dma_idx:
531 dma_free_coherent(htt->ar->dev,
532 (htt->rx_ring.size *
533 sizeof(htt->rx_ring.paddrs_ring)),
534 htt->rx_ring.paddrs_ring,
535 htt->rx_ring.base_paddr);
536 err_dma_ring:
537 kfree(htt->rx_ring.netbufs_ring);
538 err_netbuf:
539 return -ENOMEM;
540 }
541
542 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
543 enum htt_rx_mpdu_encrypt_type type)
544 {
545 switch (type) {
546 case HTT_RX_MPDU_ENCRYPT_NONE:
547 return 0;
548 case HTT_RX_MPDU_ENCRYPT_WEP40:
549 case HTT_RX_MPDU_ENCRYPT_WEP104:
550 return IEEE80211_WEP_IV_LEN;
551 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
552 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
553 return IEEE80211_TKIP_IV_LEN;
554 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
555 return IEEE80211_CCMP_HDR_LEN;
556 case HTT_RX_MPDU_ENCRYPT_WEP128:
557 case HTT_RX_MPDU_ENCRYPT_WAPI:
558 break;
559 }
560
561 ath10k_warn(ar, "unsupported encryption type %d\n", type);
562 return 0;
563 }
564
565 #define MICHAEL_MIC_LEN 8
566
567 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
568 enum htt_rx_mpdu_encrypt_type type)
569 {
570 switch (type) {
571 case HTT_RX_MPDU_ENCRYPT_NONE:
572 return 0;
573 case HTT_RX_MPDU_ENCRYPT_WEP40:
574 case HTT_RX_MPDU_ENCRYPT_WEP104:
575 return IEEE80211_WEP_ICV_LEN;
576 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
577 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
578 return IEEE80211_TKIP_ICV_LEN;
579 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
580 return IEEE80211_CCMP_MIC_LEN;
581 case HTT_RX_MPDU_ENCRYPT_WEP128:
582 case HTT_RX_MPDU_ENCRYPT_WAPI:
583 break;
584 }
585
586 ath10k_warn(ar, "unsupported encryption type %d\n", type);
587 return 0;
588 }
589
590 struct amsdu_subframe_hdr {
591 u8 dst[ETH_ALEN];
592 u8 src[ETH_ALEN];
593 __be16 len;
594 } __packed;
595
596 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
597
598 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
599 struct ieee80211_rx_status *status,
600 struct htt_rx_desc *rxd)
601 {
602 struct ieee80211_supported_band *sband;
603 u8 cck, rate, bw, sgi, mcs, nss;
604 u8 preamble = 0;
605 u8 group_id;
606 u32 info1, info2, info3;
607
608 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
609 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
610 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
611
612 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
613
614 switch (preamble) {
615 case HTT_RX_LEGACY:
616 /* To get legacy rate index band is required. Since band can't
617 * be undefined check if freq is non-zero.
618 */
619 if (!status->freq)
620 return;
621
622 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
623 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
624 rate &= ~RX_PPDU_START_RATE_FLAG;
625
626 sband = &ar->mac.sbands[status->band];
627 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
628 break;
629 case HTT_RX_HT:
630 case HTT_RX_HT_WITH_TXBF:
631 /* HT-SIG - Table 20-11 in info2 and info3 */
632 mcs = info2 & 0x1F;
633 nss = mcs >> 3;
634 bw = (info2 >> 7) & 1;
635 sgi = (info3 >> 7) & 1;
636
637 status->rate_idx = mcs;
638 status->flag |= RX_FLAG_HT;
639 if (sgi)
640 status->flag |= RX_FLAG_SHORT_GI;
641 if (bw)
642 status->flag |= RX_FLAG_40MHZ;
643 break;
644 case HTT_RX_VHT:
645 case HTT_RX_VHT_WITH_TXBF:
646 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
647 TODO check this */
648 bw = info2 & 3;
649 sgi = info3 & 1;
650 group_id = (info2 >> 4) & 0x3F;
651
652 if (GROUP_ID_IS_SU_MIMO(group_id)) {
653 mcs = (info3 >> 4) & 0x0F;
654 nss = ((info2 >> 10) & 0x07) + 1;
655 } else {
656 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
657 * so it's impossible to decode MCS. Also since
658 * firmware consumes Group Id Management frames host
659 * has no knowledge regarding group/user position
660 * mapping so it's impossible to pick the correct Nsts
661 * from VHT-SIG-A1.
662 *
663 * Bandwidth and SGI are valid so report the rateinfo
664 * on best-effort basis.
665 */
666 mcs = 0;
667 nss = 1;
668 }
669
670 if (mcs > 0x09) {
671 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
672 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
673 __le32_to_cpu(rxd->attention.flags),
674 __le32_to_cpu(rxd->mpdu_start.info0),
675 __le32_to_cpu(rxd->mpdu_start.info1),
676 __le32_to_cpu(rxd->msdu_start.common.info0),
677 __le32_to_cpu(rxd->msdu_start.common.info1),
678 rxd->ppdu_start.info0,
679 __le32_to_cpu(rxd->ppdu_start.info1),
680 __le32_to_cpu(rxd->ppdu_start.info2),
681 __le32_to_cpu(rxd->ppdu_start.info3),
682 __le32_to_cpu(rxd->ppdu_start.info4));
683
684 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
685 __le32_to_cpu(rxd->msdu_end.common.info0),
686 __le32_to_cpu(rxd->mpdu_end.info0));
687
688 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
689 "rx desc msdu payload: ",
690 rxd->msdu_payload, 50);
691 }
692
693 status->rate_idx = mcs;
694 status->vht_nss = nss;
695
696 if (sgi)
697 status->flag |= RX_FLAG_SHORT_GI;
698
699 switch (bw) {
700 /* 20MHZ */
701 case 0:
702 break;
703 /* 40MHZ */
704 case 1:
705 status->flag |= RX_FLAG_40MHZ;
706 break;
707 /* 80MHZ */
708 case 2:
709 status->vht_flag |= RX_VHT_FLAG_80MHZ;
710 }
711
712 status->flag |= RX_FLAG_VHT;
713 break;
714 default:
715 break;
716 }
717 }
718
719 static struct ieee80211_channel *
720 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
721 {
722 struct ath10k_peer *peer;
723 struct ath10k_vif *arvif;
724 struct cfg80211_chan_def def;
725 u16 peer_id;
726
727 lockdep_assert_held(&ar->data_lock);
728
729 if (!rxd)
730 return NULL;
731
732 if (rxd->attention.flags &
733 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
734 return NULL;
735
736 if (!(rxd->msdu_end.common.info0 &
737 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
738 return NULL;
739
740 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
741 RX_MPDU_START_INFO0_PEER_IDX);
742
743 peer = ath10k_peer_find_by_id(ar, peer_id);
744 if (!peer)
745 return NULL;
746
747 arvif = ath10k_get_arvif(ar, peer->vdev_id);
748 if (WARN_ON_ONCE(!arvif))
749 return NULL;
750
751 if (ath10k_mac_vif_chan(arvif->vif, &def))
752 return NULL;
753
754 return def.chan;
755 }
756
757 static struct ieee80211_channel *
758 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
759 {
760 struct ath10k_vif *arvif;
761 struct cfg80211_chan_def def;
762
763 lockdep_assert_held(&ar->data_lock);
764
765 list_for_each_entry(arvif, &ar->arvifs, list) {
766 if (arvif->vdev_id == vdev_id &&
767 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
768 return def.chan;
769 }
770
771 return NULL;
772 }
773
774 static void
775 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
776 struct ieee80211_chanctx_conf *conf,
777 void *data)
778 {
779 struct cfg80211_chan_def *def = data;
780
781 *def = conf->def;
782 }
783
784 static struct ieee80211_channel *
785 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
786 {
787 struct cfg80211_chan_def def = {};
788
789 ieee80211_iter_chan_contexts_atomic(ar->hw,
790 ath10k_htt_rx_h_any_chan_iter,
791 &def);
792
793 return def.chan;
794 }
795
796 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
797 struct ieee80211_rx_status *status,
798 struct htt_rx_desc *rxd,
799 u32 vdev_id)
800 {
801 struct ieee80211_channel *ch;
802
803 spin_lock_bh(&ar->data_lock);
804 ch = ar->scan_channel;
805 if (!ch)
806 ch = ar->rx_channel;
807 if (!ch)
808 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
809 if (!ch)
810 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
811 if (!ch)
812 ch = ath10k_htt_rx_h_any_channel(ar);
813 if (!ch)
814 ch = ar->tgt_oper_chan;
815 spin_unlock_bh(&ar->data_lock);
816
817 if (!ch)
818 return false;
819
820 status->band = ch->band;
821 status->freq = ch->center_freq;
822
823 return true;
824 }
825
826 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
827 struct ieee80211_rx_status *status,
828 struct htt_rx_desc *rxd)
829 {
830 /* FIXME: Get real NF */
831 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
832 rxd->ppdu_start.rssi_comb;
833 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
834 }
835
836 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
837 struct ieee80211_rx_status *status,
838 struct htt_rx_desc *rxd)
839 {
840 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
841 * means all prior MSDUs in a PPDU are reported to mac80211 without the
842 * TSF. Is it worth holding frames until end of PPDU is known?
843 *
844 * FIXME: Can we get/compute 64bit TSF?
845 */
846 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
847 status->flag |= RX_FLAG_MACTIME_END;
848 }
849
850 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
851 struct sk_buff_head *amsdu,
852 struct ieee80211_rx_status *status,
853 u32 vdev_id)
854 {
855 struct sk_buff *first;
856 struct htt_rx_desc *rxd;
857 bool is_first_ppdu;
858 bool is_last_ppdu;
859
860 if (skb_queue_empty(amsdu))
861 return;
862
863 first = skb_peek(amsdu);
864 rxd = (void *)first->data - sizeof(*rxd);
865
866 is_first_ppdu = !!(rxd->attention.flags &
867 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
868 is_last_ppdu = !!(rxd->attention.flags &
869 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
870
871 if (is_first_ppdu) {
872 /* New PPDU starts so clear out the old per-PPDU status. */
873 status->freq = 0;
874 status->rate_idx = 0;
875 status->vht_nss = 0;
876 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
877 status->flag &= ~(RX_FLAG_HT |
878 RX_FLAG_VHT |
879 RX_FLAG_SHORT_GI |
880 RX_FLAG_40MHZ |
881 RX_FLAG_MACTIME_END);
882 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
883
884 ath10k_htt_rx_h_signal(ar, status, rxd);
885 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
886 ath10k_htt_rx_h_rates(ar, status, rxd);
887 }
888
889 if (is_last_ppdu)
890 ath10k_htt_rx_h_mactime(ar, status, rxd);
891 }
892
893 static const char * const tid_to_ac[] = {
894 "BE",
895 "BK",
896 "BK",
897 "BE",
898 "VI",
899 "VI",
900 "VO",
901 "VO",
902 };
903
904 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
905 {
906 u8 *qc;
907 int tid;
908
909 if (!ieee80211_is_data_qos(hdr->frame_control))
910 return "";
911
912 qc = ieee80211_get_qos_ctl(hdr);
913 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
914 if (tid < 8)
915 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
916 else
917 snprintf(out, size, "tid %d", tid);
918
919 return out;
920 }
921
922 static void ath10k_process_rx(struct ath10k *ar,
923 struct ieee80211_rx_status *rx_status,
924 struct sk_buff *skb)
925 {
926 struct ieee80211_rx_status *status;
927 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
928 char tid[32];
929
930 status = IEEE80211_SKB_RXCB(skb);
931 *status = *rx_status;
932
933 ath10k_dbg(ar, ATH10K_DBG_DATA,
934 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
935 skb,
936 skb->len,
937 ieee80211_get_SA(hdr),
938 ath10k_get_tid(hdr, tid, sizeof(tid)),
939 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
940 "mcast" : "ucast",
941 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
942 (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
943 "legacy" : "",
944 status->flag & RX_FLAG_HT ? "ht" : "",
945 status->flag & RX_FLAG_VHT ? "vht" : "",
946 status->flag & RX_FLAG_40MHZ ? "40" : "",
947 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
948 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
949 status->rate_idx,
950 status->vht_nss,
951 status->freq,
952 status->band, status->flag,
953 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
954 !!(status->flag & RX_FLAG_MMIC_ERROR),
955 !!(status->flag & RX_FLAG_AMSDU_MORE));
956 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
957 skb->data, skb->len);
958 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
959 trace_ath10k_rx_payload(ar, skb->data, skb->len);
960
961 ieee80211_rx(ar->hw, skb);
962 }
963
964 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
965 struct ieee80211_hdr *hdr)
966 {
967 int len = ieee80211_hdrlen(hdr->frame_control);
968
969 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
970 ar->running_fw->fw_file.fw_features))
971 len = round_up(len, 4);
972
973 return len;
974 }
975
976 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
977 struct sk_buff *msdu,
978 struct ieee80211_rx_status *status,
979 enum htt_rx_mpdu_encrypt_type enctype,
980 bool is_decrypted)
981 {
982 struct ieee80211_hdr *hdr;
983 struct htt_rx_desc *rxd;
984 size_t hdr_len;
985 size_t crypto_len;
986 bool is_first;
987 bool is_last;
988
989 rxd = (void *)msdu->data - sizeof(*rxd);
990 is_first = !!(rxd->msdu_end.common.info0 &
991 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
992 is_last = !!(rxd->msdu_end.common.info0 &
993 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
994
995 /* Delivered decapped frame:
996 * [802.11 header]
997 * [crypto param] <-- can be trimmed if !fcs_err &&
998 * !decrypt_err && !peer_idx_invalid
999 * [amsdu header] <-- only if A-MSDU
1000 * [rfc1042/llc]
1001 * [payload]
1002 * [FCS] <-- at end, needs to be trimmed
1003 */
1004
1005 /* This probably shouldn't happen but warn just in case */
1006 if (unlikely(WARN_ON_ONCE(!is_first)))
1007 return;
1008
1009 /* This probably shouldn't happen but warn just in case */
1010 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1011 return;
1012
1013 skb_trim(msdu, msdu->len - FCS_LEN);
1014
1015 /* In most cases this will be true for sniffed frames. It makes sense
1016 * to deliver them as-is without stripping the crypto param. This is
1017 * necessary for software based decryption.
1018 *
1019 * If there's no error then the frame is decrypted. At least that is
1020 * the case for frames that come in via fragmented rx indication.
1021 */
1022 if (!is_decrypted)
1023 return;
1024
1025 /* The payload is decrypted so strip crypto params. Start from tail
1026 * since hdr is used to compute some stuff.
1027 */
1028
1029 hdr = (void *)msdu->data;
1030
1031 /* Tail */
1032 if (status->flag & RX_FLAG_IV_STRIPPED)
1033 skb_trim(msdu, msdu->len -
1034 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1035
1036 /* MMIC */
1037 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1038 !ieee80211_has_morefrags(hdr->frame_control) &&
1039 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1040 skb_trim(msdu, msdu->len - 8);
1041
1042 /* Head */
1043 if (status->flag & RX_FLAG_IV_STRIPPED) {
1044 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1045 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1046
1047 memmove((void *)msdu->data + crypto_len,
1048 (void *)msdu->data, hdr_len);
1049 skb_pull(msdu, crypto_len);
1050 }
1051 }
1052
1053 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1054 struct sk_buff *msdu,
1055 struct ieee80211_rx_status *status,
1056 const u8 first_hdr[64])
1057 {
1058 struct ieee80211_hdr *hdr;
1059 size_t hdr_len;
1060 u8 da[ETH_ALEN];
1061 u8 sa[ETH_ALEN];
1062
1063 /* Delivered decapped frame:
1064 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1065 * [rfc1042/llc]
1066 *
1067 * Note: The nwifi header doesn't have QoS Control and is
1068 * (always?) a 3addr frame.
1069 *
1070 * Note2: There's no A-MSDU subframe header. Even if it's part
1071 * of an A-MSDU.
1072 */
1073
1074 /* pull decapped header and copy SA & DA */
1075 if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
1076 ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
1077 /* The QCA99X0 4 address mode pad 2 bytes at the
1078 * beginning of MSDU
1079 */
1080 hdr = (struct ieee80211_hdr *)(msdu->data + 2);
1081 /* The skb length need be extended 2 as the 2 bytes at the tail
1082 * be excluded due to the padding
1083 */
1084 skb_put(msdu, 2);
1085 } else {
1086 hdr = (struct ieee80211_hdr *)(msdu->data);
1087 }
1088
1089 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1090 ether_addr_copy(da, ieee80211_get_DA(hdr));
1091 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1092 skb_pull(msdu, hdr_len);
1093
1094 /* push original 802.11 header */
1095 hdr = (struct ieee80211_hdr *)first_hdr;
1096 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1097 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1098
1099 /* original 802.11 header has a different DA and in
1100 * case of 4addr it may also have different SA
1101 */
1102 hdr = (struct ieee80211_hdr *)msdu->data;
1103 ether_addr_copy(ieee80211_get_DA(hdr), da);
1104 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1105 }
1106
1107 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1108 struct sk_buff *msdu,
1109 enum htt_rx_mpdu_encrypt_type enctype)
1110 {
1111 struct ieee80211_hdr *hdr;
1112 struct htt_rx_desc *rxd;
1113 size_t hdr_len, crypto_len;
1114 void *rfc1042;
1115 bool is_first, is_last, is_amsdu;
1116
1117 rxd = (void *)msdu->data - sizeof(*rxd);
1118 hdr = (void *)rxd->rx_hdr_status;
1119
1120 is_first = !!(rxd->msdu_end.common.info0 &
1121 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1122 is_last = !!(rxd->msdu_end.common.info0 &
1123 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1124 is_amsdu = !(is_first && is_last);
1125
1126 rfc1042 = hdr;
1127
1128 if (is_first) {
1129 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1130 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1131
1132 rfc1042 += round_up(hdr_len, 4) +
1133 round_up(crypto_len, 4);
1134 }
1135
1136 if (is_amsdu)
1137 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1138
1139 return rfc1042;
1140 }
1141
1142 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1143 struct sk_buff *msdu,
1144 struct ieee80211_rx_status *status,
1145 const u8 first_hdr[64],
1146 enum htt_rx_mpdu_encrypt_type enctype)
1147 {
1148 struct ieee80211_hdr *hdr;
1149 struct ethhdr *eth;
1150 size_t hdr_len;
1151 void *rfc1042;
1152 u8 da[ETH_ALEN];
1153 u8 sa[ETH_ALEN];
1154
1155 /* Delivered decapped frame:
1156 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1157 * [payload]
1158 */
1159
1160 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1161 if (WARN_ON_ONCE(!rfc1042))
1162 return;
1163
1164 /* pull decapped header and copy SA & DA */
1165 eth = (struct ethhdr *)msdu->data;
1166 ether_addr_copy(da, eth->h_dest);
1167 ether_addr_copy(sa, eth->h_source);
1168 skb_pull(msdu, sizeof(struct ethhdr));
1169
1170 /* push rfc1042/llc/snap */
1171 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1172 sizeof(struct rfc1042_hdr));
1173
1174 /* push original 802.11 header */
1175 hdr = (struct ieee80211_hdr *)first_hdr;
1176 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1177 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1178
1179 /* original 802.11 header has a different DA and in
1180 * case of 4addr it may also have different SA
1181 */
1182 hdr = (struct ieee80211_hdr *)msdu->data;
1183 ether_addr_copy(ieee80211_get_DA(hdr), da);
1184 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1185 }
1186
1187 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1188 struct sk_buff *msdu,
1189 struct ieee80211_rx_status *status,
1190 const u8 first_hdr[64])
1191 {
1192 struct ieee80211_hdr *hdr;
1193 size_t hdr_len;
1194
1195 /* Delivered decapped frame:
1196 * [amsdu header] <-- replaced with 802.11 hdr
1197 * [rfc1042/llc]
1198 * [payload]
1199 */
1200
1201 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1202
1203 hdr = (struct ieee80211_hdr *)first_hdr;
1204 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1205 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1206 }
1207
1208 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1209 struct sk_buff *msdu,
1210 struct ieee80211_rx_status *status,
1211 u8 first_hdr[64],
1212 enum htt_rx_mpdu_encrypt_type enctype,
1213 bool is_decrypted)
1214 {
1215 struct htt_rx_desc *rxd;
1216 enum rx_msdu_decap_format decap;
1217
1218 /* First msdu's decapped header:
1219 * [802.11 header] <-- padded to 4 bytes long
1220 * [crypto param] <-- padded to 4 bytes long
1221 * [amsdu header] <-- only if A-MSDU
1222 * [rfc1042/llc]
1223 *
1224 * Other (2nd, 3rd, ..) msdu's decapped header:
1225 * [amsdu header] <-- only if A-MSDU
1226 * [rfc1042/llc]
1227 */
1228
1229 rxd = (void *)msdu->data - sizeof(*rxd);
1230 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1231 RX_MSDU_START_INFO1_DECAP_FORMAT);
1232
1233 switch (decap) {
1234 case RX_MSDU_DECAP_RAW:
1235 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1236 is_decrypted);
1237 break;
1238 case RX_MSDU_DECAP_NATIVE_WIFI:
1239 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1240 break;
1241 case RX_MSDU_DECAP_ETHERNET2_DIX:
1242 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1243 break;
1244 case RX_MSDU_DECAP_8023_SNAP_LLC:
1245 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1246 break;
1247 }
1248 }
1249
1250 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1251 {
1252 struct htt_rx_desc *rxd;
1253 u32 flags, info;
1254 bool is_ip4, is_ip6;
1255 bool is_tcp, is_udp;
1256 bool ip_csum_ok, tcpudp_csum_ok;
1257
1258 rxd = (void *)skb->data - sizeof(*rxd);
1259 flags = __le32_to_cpu(rxd->attention.flags);
1260 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1261
1262 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1263 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1264 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1265 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1266 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1267 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1268
1269 if (!is_ip4 && !is_ip6)
1270 return CHECKSUM_NONE;
1271 if (!is_tcp && !is_udp)
1272 return CHECKSUM_NONE;
1273 if (!ip_csum_ok)
1274 return CHECKSUM_NONE;
1275 if (!tcpudp_csum_ok)
1276 return CHECKSUM_NONE;
1277
1278 return CHECKSUM_UNNECESSARY;
1279 }
1280
1281 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1282 {
1283 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1284 }
1285
1286 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1287 struct sk_buff_head *amsdu,
1288 struct ieee80211_rx_status *status)
1289 {
1290 struct sk_buff *first;
1291 struct sk_buff *last;
1292 struct sk_buff *msdu;
1293 struct htt_rx_desc *rxd;
1294 struct ieee80211_hdr *hdr;
1295 enum htt_rx_mpdu_encrypt_type enctype;
1296 u8 first_hdr[64];
1297 u8 *qos;
1298 size_t hdr_len;
1299 bool has_fcs_err;
1300 bool has_crypto_err;
1301 bool has_tkip_err;
1302 bool has_peer_idx_invalid;
1303 bool is_decrypted;
1304 bool is_mgmt;
1305 u32 attention;
1306
1307 if (skb_queue_empty(amsdu))
1308 return;
1309
1310 first = skb_peek(amsdu);
1311 rxd = (void *)first->data - sizeof(*rxd);
1312
1313 is_mgmt = !!(rxd->attention.flags &
1314 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1315
1316 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1317 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1318
1319 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1320 * decapped header. It'll be used for undecapping of each MSDU.
1321 */
1322 hdr = (void *)rxd->rx_hdr_status;
1323 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1324 memcpy(first_hdr, hdr, hdr_len);
1325
1326 /* Each A-MSDU subframe will use the original header as the base and be
1327 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1328 */
1329 hdr = (void *)first_hdr;
1330 qos = ieee80211_get_qos_ctl(hdr);
1331 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1332
1333 /* Some attention flags are valid only in the last MSDU. */
1334 last = skb_peek_tail(amsdu);
1335 rxd = (void *)last->data - sizeof(*rxd);
1336 attention = __le32_to_cpu(rxd->attention.flags);
1337
1338 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1339 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1340 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1341 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1342
1343 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1344 * e.g. due to fcs error, missing peer or invalid key data it will
1345 * report the frame as raw.
1346 */
1347 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1348 !has_fcs_err &&
1349 !has_crypto_err &&
1350 !has_peer_idx_invalid);
1351
1352 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1353 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1354 RX_FLAG_MMIC_ERROR |
1355 RX_FLAG_DECRYPTED |
1356 RX_FLAG_IV_STRIPPED |
1357 RX_FLAG_ONLY_MONITOR |
1358 RX_FLAG_MMIC_STRIPPED);
1359
1360 if (has_fcs_err)
1361 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1362
1363 if (has_tkip_err)
1364 status->flag |= RX_FLAG_MMIC_ERROR;
1365
1366 /* Firmware reports all necessary management frames via WMI already.
1367 * They are not reported to monitor interfaces at all so pass the ones
1368 * coming via HTT to monitor interfaces instead. This simplifies
1369 * matters a lot.
1370 */
1371 if (is_mgmt)
1372 status->flag |= RX_FLAG_ONLY_MONITOR;
1373
1374 if (is_decrypted) {
1375 status->flag |= RX_FLAG_DECRYPTED;
1376
1377 if (likely(!is_mgmt))
1378 status->flag |= RX_FLAG_IV_STRIPPED |
1379 RX_FLAG_MMIC_STRIPPED;
1380 }
1381
1382 skb_queue_walk(amsdu, msdu) {
1383 ath10k_htt_rx_h_csum_offload(msdu);
1384 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1385 is_decrypted);
1386
1387 /* Undecapping involves copying the original 802.11 header back
1388 * to sk_buff. If frame is protected and hardware has decrypted
1389 * it then remove the protected bit.
1390 */
1391 if (!is_decrypted)
1392 continue;
1393 if (is_mgmt)
1394 continue;
1395
1396 hdr = (void *)msdu->data;
1397 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1398 }
1399 }
1400
1401 static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1402 struct sk_buff_head *amsdu,
1403 struct ieee80211_rx_status *status)
1404 {
1405 struct sk_buff *msdu;
1406
1407 while ((msdu = __skb_dequeue(amsdu))) {
1408 /* Setup per-MSDU flags */
1409 if (skb_queue_empty(amsdu))
1410 status->flag &= ~RX_FLAG_AMSDU_MORE;
1411 else
1412 status->flag |= RX_FLAG_AMSDU_MORE;
1413
1414 ath10k_process_rx(ar, status, msdu);
1415 }
1416 }
1417
1418 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1419 {
1420 struct sk_buff *skb, *first;
1421 int space;
1422 int total_len = 0;
1423
1424 /* TODO: Might could optimize this by using
1425 * skb_try_coalesce or similar method to
1426 * decrease copying, or maybe get mac80211 to
1427 * provide a way to just receive a list of
1428 * skb?
1429 */
1430
1431 first = __skb_dequeue(amsdu);
1432
1433 /* Allocate total length all at once. */
1434 skb_queue_walk(amsdu, skb)
1435 total_len += skb->len;
1436
1437 space = total_len - skb_tailroom(first);
1438 if ((space > 0) &&
1439 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1440 /* TODO: bump some rx-oom error stat */
1441 /* put it back together so we can free the
1442 * whole list at once.
1443 */
1444 __skb_queue_head(amsdu, first);
1445 return -1;
1446 }
1447
1448 /* Walk list again, copying contents into
1449 * msdu_head
1450 */
1451 while ((skb = __skb_dequeue(amsdu))) {
1452 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1453 skb->len);
1454 dev_kfree_skb_any(skb);
1455 }
1456
1457 __skb_queue_head(amsdu, first);
1458 return 0;
1459 }
1460
1461 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1462 struct sk_buff_head *amsdu,
1463 bool chained)
1464 {
1465 struct sk_buff *first;
1466 struct htt_rx_desc *rxd;
1467 enum rx_msdu_decap_format decap;
1468
1469 first = skb_peek(amsdu);
1470 rxd = (void *)first->data - sizeof(*rxd);
1471 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1472 RX_MSDU_START_INFO1_DECAP_FORMAT);
1473
1474 if (!chained)
1475 return;
1476
1477 /* FIXME: Current unchaining logic can only handle simple case of raw
1478 * msdu chaining. If decapping is other than raw the chaining may be
1479 * more complex and this isn't handled by the current code. Don't even
1480 * try re-constructing such frames - it'll be pretty much garbage.
1481 */
1482 if (decap != RX_MSDU_DECAP_RAW ||
1483 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1484 __skb_queue_purge(amsdu);
1485 return;
1486 }
1487
1488 ath10k_unchain_msdu(amsdu);
1489 }
1490
1491 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1492 struct sk_buff_head *amsdu,
1493 struct ieee80211_rx_status *rx_status)
1494 {
1495 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1496 * invalid/dangerous frames.
1497 */
1498
1499 if (!rx_status->freq) {
1500 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1501 return false;
1502 }
1503
1504 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1505 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1506 return false;
1507 }
1508
1509 return true;
1510 }
1511
1512 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1513 struct sk_buff_head *amsdu,
1514 struct ieee80211_rx_status *rx_status)
1515 {
1516 if (skb_queue_empty(amsdu))
1517 return;
1518
1519 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1520 return;
1521
1522 __skb_queue_purge(amsdu);
1523 }
1524
1525 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1526 {
1527 struct ath10k *ar = htt->ar;
1528 static struct ieee80211_rx_status rx_status;
1529 struct sk_buff_head amsdu;
1530 int ret;
1531
1532 __skb_queue_head_init(&amsdu);
1533
1534 spin_lock_bh(&htt->rx_ring.lock);
1535 if (htt->rx_confused) {
1536 spin_unlock_bh(&htt->rx_ring.lock);
1537 return -EIO;
1538 }
1539 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1540 spin_unlock_bh(&htt->rx_ring.lock);
1541
1542 if (ret < 0) {
1543 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1544 __skb_queue_purge(&amsdu);
1545 /* FIXME: It's probably a good idea to reboot the
1546 * device instead of leaving it inoperable.
1547 */
1548 htt->rx_confused = true;
1549 return ret;
1550 }
1551
1552 ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
1553 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1554 ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
1555 ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
1556 ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
1557
1558 return 0;
1559 }
1560
1561 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1562 struct htt_rx_indication *rx)
1563 {
1564 struct ath10k *ar = htt->ar;
1565 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1566 int num_mpdu_ranges;
1567 int i, mpdu_count = 0;
1568
1569 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1570 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1571 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1572
1573 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1574 rx, sizeof(*rx) +
1575 (sizeof(struct htt_rx_indication_mpdu_range) *
1576 num_mpdu_ranges));
1577
1578 for (i = 0; i < num_mpdu_ranges; i++)
1579 mpdu_count += mpdu_ranges[i].mpdu_count;
1580
1581 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1582
1583 tasklet_schedule(&htt->txrx_compl_task);
1584 }
1585
1586 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
1587 {
1588 atomic_inc(&htt->num_mpdus_ready);
1589
1590 tasklet_schedule(&htt->txrx_compl_task);
1591 }
1592
1593 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1594 struct sk_buff *skb)
1595 {
1596 struct ath10k_htt *htt = &ar->htt;
1597 struct htt_resp *resp = (struct htt_resp *)skb->data;
1598 struct htt_tx_done tx_done = {};
1599 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1600 __le16 msdu_id;
1601 int i;
1602
1603 switch (status) {
1604 case HTT_DATA_TX_STATUS_NO_ACK:
1605 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1606 break;
1607 case HTT_DATA_TX_STATUS_OK:
1608 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1609 break;
1610 case HTT_DATA_TX_STATUS_DISCARD:
1611 case HTT_DATA_TX_STATUS_POSTPONE:
1612 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1613 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1614 break;
1615 default:
1616 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1617 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1618 break;
1619 }
1620
1621 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1622 resp->data_tx_completion.num_msdus);
1623
1624 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1625 msdu_id = resp->data_tx_completion.msdus[i];
1626 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1627
1628 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1629 * interrupt and main interrupt (MSI/-X range case) for the same
1630 * HTC service so it should be safe to use kfifo_put w/o lock.
1631 *
1632 * From kfifo_put() documentation:
1633 * Note that with only one concurrent reader and one concurrent
1634 * writer, you don't need extra locking to use these macro.
1635 */
1636 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1637 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1638 tx_done.msdu_id, tx_done.status);
1639 ath10k_txrx_tx_unref(htt, &tx_done);
1640 }
1641 }
1642 }
1643
1644 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1645 {
1646 struct htt_rx_addba *ev = &resp->rx_addba;
1647 struct ath10k_peer *peer;
1648 struct ath10k_vif *arvif;
1649 u16 info0, tid, peer_id;
1650
1651 info0 = __le16_to_cpu(ev->info0);
1652 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1653 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1654
1655 ath10k_dbg(ar, ATH10K_DBG_HTT,
1656 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1657 tid, peer_id, ev->window_size);
1658
1659 spin_lock_bh(&ar->data_lock);
1660 peer = ath10k_peer_find_by_id(ar, peer_id);
1661 if (!peer) {
1662 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1663 peer_id);
1664 spin_unlock_bh(&ar->data_lock);
1665 return;
1666 }
1667
1668 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1669 if (!arvif) {
1670 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1671 peer->vdev_id);
1672 spin_unlock_bh(&ar->data_lock);
1673 return;
1674 }
1675
1676 ath10k_dbg(ar, ATH10K_DBG_HTT,
1677 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1678 peer->addr, tid, ev->window_size);
1679
1680 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1681 spin_unlock_bh(&ar->data_lock);
1682 }
1683
1684 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1685 {
1686 struct htt_rx_delba *ev = &resp->rx_delba;
1687 struct ath10k_peer *peer;
1688 struct ath10k_vif *arvif;
1689 u16 info0, tid, peer_id;
1690
1691 info0 = __le16_to_cpu(ev->info0);
1692 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1693 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1694
1695 ath10k_dbg(ar, ATH10K_DBG_HTT,
1696 "htt rx delba tid %hu peer_id %hu\n",
1697 tid, peer_id);
1698
1699 spin_lock_bh(&ar->data_lock);
1700 peer = ath10k_peer_find_by_id(ar, peer_id);
1701 if (!peer) {
1702 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1703 peer_id);
1704 spin_unlock_bh(&ar->data_lock);
1705 return;
1706 }
1707
1708 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1709 if (!arvif) {
1710 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1711 peer->vdev_id);
1712 spin_unlock_bh(&ar->data_lock);
1713 return;
1714 }
1715
1716 ath10k_dbg(ar, ATH10K_DBG_HTT,
1717 "htt rx stop rx ba session sta %pM tid %hu\n",
1718 peer->addr, tid);
1719
1720 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1721 spin_unlock_bh(&ar->data_lock);
1722 }
1723
1724 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1725 struct sk_buff_head *amsdu)
1726 {
1727 struct sk_buff *msdu;
1728 struct htt_rx_desc *rxd;
1729
1730 if (skb_queue_empty(list))
1731 return -ENOBUFS;
1732
1733 if (WARN_ON(!skb_queue_empty(amsdu)))
1734 return -EINVAL;
1735
1736 while ((msdu = __skb_dequeue(list))) {
1737 __skb_queue_tail(amsdu, msdu);
1738
1739 rxd = (void *)msdu->data - sizeof(*rxd);
1740 if (rxd->msdu_end.common.info0 &
1741 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1742 break;
1743 }
1744
1745 msdu = skb_peek_tail(amsdu);
1746 rxd = (void *)msdu->data - sizeof(*rxd);
1747 if (!(rxd->msdu_end.common.info0 &
1748 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1749 skb_queue_splice_init(amsdu, list);
1750 return -EAGAIN;
1751 }
1752
1753 return 0;
1754 }
1755
1756 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1757 struct sk_buff *skb)
1758 {
1759 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1760
1761 if (!ieee80211_has_protected(hdr->frame_control))
1762 return;
1763
1764 /* Offloaded frames are already decrypted but firmware insists they are
1765 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1766 * will drop the frame.
1767 */
1768
1769 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1770 status->flag |= RX_FLAG_DECRYPTED |
1771 RX_FLAG_IV_STRIPPED |
1772 RX_FLAG_MMIC_STRIPPED;
1773 }
1774
1775 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1776 struct sk_buff_head *list)
1777 {
1778 struct ath10k_htt *htt = &ar->htt;
1779 struct ieee80211_rx_status *status = &htt->rx_status;
1780 struct htt_rx_offload_msdu *rx;
1781 struct sk_buff *msdu;
1782 size_t offset;
1783
1784 while ((msdu = __skb_dequeue(list))) {
1785 /* Offloaded frames don't have Rx descriptor. Instead they have
1786 * a short meta information header.
1787 */
1788
1789 rx = (void *)msdu->data;
1790
1791 skb_put(msdu, sizeof(*rx));
1792 skb_pull(msdu, sizeof(*rx));
1793
1794 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1795 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1796 dev_kfree_skb_any(msdu);
1797 continue;
1798 }
1799
1800 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1801
1802 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1803 * actual payload is unaligned. Align the frame. Otherwise
1804 * mac80211 complains. This shouldn't reduce performance much
1805 * because these offloaded frames are rare.
1806 */
1807 offset = 4 - ((unsigned long)msdu->data & 3);
1808 skb_put(msdu, offset);
1809 memmove(msdu->data + offset, msdu->data, msdu->len);
1810 skb_pull(msdu, offset);
1811
1812 /* FIXME: The frame is NWifi. Re-construct QoS Control
1813 * if possible later.
1814 */
1815
1816 memset(status, 0, sizeof(*status));
1817 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1818
1819 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1820 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1821 ath10k_process_rx(ar, status, msdu);
1822 }
1823 }
1824
1825 static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1826 {
1827 struct ath10k_htt *htt = &ar->htt;
1828 struct htt_resp *resp = (void *)skb->data;
1829 struct ieee80211_rx_status *status = &htt->rx_status;
1830 struct sk_buff_head list;
1831 struct sk_buff_head amsdu;
1832 u16 peer_id;
1833 u16 msdu_count;
1834 u8 vdev_id;
1835 u8 tid;
1836 bool offload;
1837 bool frag;
1838 int ret;
1839
1840 lockdep_assert_held(&htt->rx_ring.lock);
1841
1842 if (htt->rx_confused)
1843 return;
1844
1845 skb_pull(skb, sizeof(resp->hdr));
1846 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1847
1848 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1849 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1850 vdev_id = resp->rx_in_ord_ind.vdev_id;
1851 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1852 offload = !!(resp->rx_in_ord_ind.info &
1853 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1854 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1855
1856 ath10k_dbg(ar, ATH10K_DBG_HTT,
1857 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1858 vdev_id, peer_id, tid, offload, frag, msdu_count);
1859
1860 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1861 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1862 return;
1863 }
1864
1865 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1866 * extracted and processed.
1867 */
1868 __skb_queue_head_init(&list);
1869 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1870 if (ret < 0) {
1871 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1872 htt->rx_confused = true;
1873 return;
1874 }
1875
1876 /* Offloaded frames are very different and need to be handled
1877 * separately.
1878 */
1879 if (offload)
1880 ath10k_htt_rx_h_rx_offload(ar, &list);
1881
1882 while (!skb_queue_empty(&list)) {
1883 __skb_queue_head_init(&amsdu);
1884 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1885 switch (ret) {
1886 case 0:
1887 /* Note: The in-order indication may report interleaved
1888 * frames from different PPDUs meaning reported rx rate
1889 * to mac80211 isn't accurate/reliable. It's still
1890 * better to report something than nothing though. This
1891 * should still give an idea about rx rate to the user.
1892 */
1893 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1894 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1895 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1896 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1897 break;
1898 case -EAGAIN:
1899 /* fall through */
1900 default:
1901 /* Should not happen. */
1902 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1903 htt->rx_confused = true;
1904 __skb_queue_purge(&list);
1905 return;
1906 }
1907 }
1908 }
1909
1910 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1911 const __le32 *resp_ids,
1912 int num_resp_ids)
1913 {
1914 int i;
1915 u32 resp_id;
1916
1917 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1918 num_resp_ids);
1919
1920 for (i = 0; i < num_resp_ids; i++) {
1921 resp_id = le32_to_cpu(resp_ids[i]);
1922
1923 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
1924 resp_id);
1925
1926 /* TODO: free resp_id */
1927 }
1928 }
1929
1930 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
1931 {
1932 struct ieee80211_hw *hw = ar->hw;
1933 struct ieee80211_txq *txq;
1934 struct htt_resp *resp = (struct htt_resp *)skb->data;
1935 struct htt_tx_fetch_record *record;
1936 size_t len;
1937 size_t max_num_bytes;
1938 size_t max_num_msdus;
1939 size_t num_bytes;
1940 size_t num_msdus;
1941 const __le32 *resp_ids;
1942 u16 num_records;
1943 u16 num_resp_ids;
1944 u16 peer_id;
1945 u8 tid;
1946 int ret;
1947 int i;
1948
1949 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
1950
1951 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
1952 if (unlikely(skb->len < len)) {
1953 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
1954 return;
1955 }
1956
1957 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
1958 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
1959
1960 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
1961 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
1962
1963 if (unlikely(skb->len < len)) {
1964 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1965 return;
1966 }
1967
1968 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1969 num_records, num_resp_ids,
1970 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
1971
1972 if (!ar->htt.tx_q_state.enabled) {
1973 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
1974 return;
1975 }
1976
1977 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
1978 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
1979 return;
1980 }
1981
1982 rcu_read_lock();
1983
1984 for (i = 0; i < num_records; i++) {
1985 record = &resp->tx_fetch_ind.records[i];
1986 peer_id = MS(le16_to_cpu(record->info),
1987 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
1988 tid = MS(le16_to_cpu(record->info),
1989 HTT_TX_FETCH_RECORD_INFO_TID);
1990 max_num_msdus = le16_to_cpu(record->num_msdus);
1991 max_num_bytes = le32_to_cpu(record->num_bytes);
1992
1993 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1994 i, peer_id, tid, max_num_msdus, max_num_bytes);
1995
1996 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
1997 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
1998 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
1999 peer_id, tid);
2000 continue;
2001 }
2002
2003 spin_lock_bh(&ar->data_lock);
2004 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2005 spin_unlock_bh(&ar->data_lock);
2006
2007 /* It is okay to release the lock and use txq because RCU read
2008 * lock is held.
2009 */
2010
2011 if (unlikely(!txq)) {
2012 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2013 peer_id, tid);
2014 continue;
2015 }
2016
2017 num_msdus = 0;
2018 num_bytes = 0;
2019
2020 while (num_msdus < max_num_msdus &&
2021 num_bytes < max_num_bytes) {
2022 ret = ath10k_mac_tx_push_txq(hw, txq);
2023 if (ret < 0)
2024 break;
2025
2026 num_msdus++;
2027 num_bytes += ret;
2028 }
2029
2030 record->num_msdus = cpu_to_le16(num_msdus);
2031 record->num_bytes = cpu_to_le32(num_bytes);
2032
2033 ath10k_htt_tx_txq_recalc(hw, txq);
2034 }
2035
2036 rcu_read_unlock();
2037
2038 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2039 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2040
2041 ret = ath10k_htt_tx_fetch_resp(ar,
2042 resp->tx_fetch_ind.token,
2043 resp->tx_fetch_ind.fetch_seq_num,
2044 resp->tx_fetch_ind.records,
2045 num_records);
2046 if (unlikely(ret)) {
2047 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2048 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2049 /* FIXME: request fw restart */
2050 }
2051
2052 ath10k_htt_tx_txq_sync(ar);
2053 }
2054
2055 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2056 struct sk_buff *skb)
2057 {
2058 const struct htt_resp *resp = (void *)skb->data;
2059 size_t len;
2060 int num_resp_ids;
2061
2062 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2063
2064 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2065 if (unlikely(skb->len < len)) {
2066 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2067 return;
2068 }
2069
2070 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2071 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2072
2073 if (unlikely(skb->len < len)) {
2074 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2075 return;
2076 }
2077
2078 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2079 resp->tx_fetch_confirm.resp_ids,
2080 num_resp_ids);
2081 }
2082
2083 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2084 struct sk_buff *skb)
2085 {
2086 const struct htt_resp *resp = (void *)skb->data;
2087 const struct htt_tx_mode_switch_record *record;
2088 struct ieee80211_txq *txq;
2089 struct ath10k_txq *artxq;
2090 size_t len;
2091 size_t num_records;
2092 enum htt_tx_mode_switch_mode mode;
2093 bool enable;
2094 u16 info0;
2095 u16 info1;
2096 u16 threshold;
2097 u16 peer_id;
2098 u8 tid;
2099 int i;
2100
2101 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2102
2103 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2104 if (unlikely(skb->len < len)) {
2105 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2106 return;
2107 }
2108
2109 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2110 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2111
2112 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2113 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2114 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2115 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2116
2117 ath10k_dbg(ar, ATH10K_DBG_HTT,
2118 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2119 info0, info1, enable, num_records, mode, threshold);
2120
2121 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2122
2123 if (unlikely(skb->len < len)) {
2124 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2125 return;
2126 }
2127
2128 switch (mode) {
2129 case HTT_TX_MODE_SWITCH_PUSH:
2130 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2131 break;
2132 default:
2133 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2134 mode);
2135 return;
2136 }
2137
2138 if (!enable)
2139 return;
2140
2141 ar->htt.tx_q_state.enabled = enable;
2142 ar->htt.tx_q_state.mode = mode;
2143 ar->htt.tx_q_state.num_push_allowed = threshold;
2144
2145 rcu_read_lock();
2146
2147 for (i = 0; i < num_records; i++) {
2148 record = &resp->tx_mode_switch_ind.records[i];
2149 info0 = le16_to_cpu(record->info0);
2150 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2151 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2152
2153 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2154 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2155 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2156 peer_id, tid);
2157 continue;
2158 }
2159
2160 spin_lock_bh(&ar->data_lock);
2161 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2162 spin_unlock_bh(&ar->data_lock);
2163
2164 /* It is okay to release the lock and use txq because RCU read
2165 * lock is held.
2166 */
2167
2168 if (unlikely(!txq)) {
2169 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2170 peer_id, tid);
2171 continue;
2172 }
2173
2174 spin_lock_bh(&ar->htt.tx_lock);
2175 artxq = (void *)txq->drv_priv;
2176 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2177 spin_unlock_bh(&ar->htt.tx_lock);
2178 }
2179
2180 rcu_read_unlock();
2181
2182 ath10k_mac_tx_push_pending(ar);
2183 }
2184
2185 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2186 {
2187 bool release;
2188
2189 release = ath10k_htt_t2h_msg_handler(ar, skb);
2190
2191 /* Free the indication buffer */
2192 if (release)
2193 dev_kfree_skb_any(skb);
2194 }
2195
2196 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2197 {
2198 struct ath10k_htt *htt = &ar->htt;
2199 struct htt_resp *resp = (struct htt_resp *)skb->data;
2200 enum htt_t2h_msg_type type;
2201
2202 /* confirm alignment */
2203 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2204 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2205
2206 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2207 resp->hdr.msg_type);
2208
2209 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2210 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2211 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2212 return true;
2213 }
2214 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2215
2216 switch (type) {
2217 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2218 htt->target_version_major = resp->ver_resp.major;
2219 htt->target_version_minor = resp->ver_resp.minor;
2220 complete(&htt->target_version_received);
2221 break;
2222 }
2223 case HTT_T2H_MSG_TYPE_RX_IND:
2224 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2225 break;
2226 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2227 struct htt_peer_map_event ev = {
2228 .vdev_id = resp->peer_map.vdev_id,
2229 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2230 };
2231 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2232 ath10k_peer_map_event(htt, &ev);
2233 break;
2234 }
2235 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2236 struct htt_peer_unmap_event ev = {
2237 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2238 };
2239 ath10k_peer_unmap_event(htt, &ev);
2240 break;
2241 }
2242 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2243 struct htt_tx_done tx_done = {};
2244 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2245
2246 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2247
2248 switch (status) {
2249 case HTT_MGMT_TX_STATUS_OK:
2250 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2251 break;
2252 case HTT_MGMT_TX_STATUS_RETRY:
2253 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2254 break;
2255 case HTT_MGMT_TX_STATUS_DROP:
2256 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2257 break;
2258 }
2259
2260 status = ath10k_txrx_tx_unref(htt, &tx_done);
2261 if (!status) {
2262 spin_lock_bh(&htt->tx_lock);
2263 ath10k_htt_tx_mgmt_dec_pending(htt);
2264 spin_unlock_bh(&htt->tx_lock);
2265 }
2266 break;
2267 }
2268 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2269 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2270 tasklet_schedule(&htt->txrx_compl_task);
2271 break;
2272 case HTT_T2H_MSG_TYPE_SEC_IND: {
2273 struct ath10k *ar = htt->ar;
2274 struct htt_security_indication *ev = &resp->security_indication;
2275
2276 ath10k_dbg(ar, ATH10K_DBG_HTT,
2277 "sec ind peer_id %d unicast %d type %d\n",
2278 __le16_to_cpu(ev->peer_id),
2279 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2280 MS(ev->flags, HTT_SECURITY_TYPE));
2281 complete(&ar->install_key_done);
2282 break;
2283 }
2284 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2285 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2286 skb->data, skb->len);
2287 ath10k_htt_rx_frag_handler(htt);
2288 break;
2289 }
2290 case HTT_T2H_MSG_TYPE_TEST:
2291 break;
2292 case HTT_T2H_MSG_TYPE_STATS_CONF:
2293 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2294 break;
2295 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2296 /* Firmware can return tx frames if it's unable to fully
2297 * process them and suspects host may be able to fix it. ath10k
2298 * sends all tx frames as already inspected so this shouldn't
2299 * happen unless fw has a bug.
2300 */
2301 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2302 break;
2303 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2304 ath10k_htt_rx_addba(ar, resp);
2305 break;
2306 case HTT_T2H_MSG_TYPE_RX_DELBA:
2307 ath10k_htt_rx_delba(ar, resp);
2308 break;
2309 case HTT_T2H_MSG_TYPE_PKTLOG: {
2310 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2311 skb->len -
2312 offsetof(struct htt_resp,
2313 pktlog_msg.payload));
2314 break;
2315 }
2316 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2317 /* Ignore this event because mac80211 takes care of Rx
2318 * aggregation reordering.
2319 */
2320 break;
2321 }
2322 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2323 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2324 tasklet_schedule(&htt->txrx_compl_task);
2325 return false;
2326 }
2327 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2328 break;
2329 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2330 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2331 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2332
2333 ar->tgt_oper_chan =
2334 __ieee80211_get_channel(ar->hw->wiphy, freq);
2335 ath10k_dbg(ar, ATH10K_DBG_HTT,
2336 "htt chan change freq %u phymode %s\n",
2337 freq, ath10k_wmi_phymode_str(phymode));
2338 break;
2339 }
2340 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2341 break;
2342 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2343 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2344
2345 if (!tx_fetch_ind) {
2346 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2347 break;
2348 }
2349 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2350 tasklet_schedule(&htt->txrx_compl_task);
2351 break;
2352 }
2353 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2354 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2355 break;
2356 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2357 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2358 break;
2359 case HTT_T2H_MSG_TYPE_EN_STATS:
2360 default:
2361 ath10k_warn(ar, "htt event (%d) not handled\n",
2362 resp->hdr.msg_type);
2363 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2364 skb->data, skb->len);
2365 break;
2366 };
2367 return true;
2368 }
2369 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2370
2371 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2372 struct sk_buff *skb)
2373 {
2374 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2375 dev_kfree_skb_any(skb);
2376 }
2377 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2378
2379 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2380 {
2381 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2382 struct ath10k *ar = htt->ar;
2383 struct htt_tx_done tx_done = {};
2384 struct sk_buff_head rx_ind_q;
2385 struct sk_buff_head tx_ind_q;
2386 struct sk_buff *skb;
2387 unsigned long flags;
2388 int num_mpdus;
2389
2390 __skb_queue_head_init(&rx_ind_q);
2391 __skb_queue_head_init(&tx_ind_q);
2392
2393 spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
2394 skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
2395 spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
2396
2397 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2398 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2399 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2400
2401 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2402 * From kfifo_get() documentation:
2403 * Note that with only one concurrent reader and one concurrent writer,
2404 * you don't need extra locking to use these macro.
2405 */
2406 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2407 ath10k_txrx_tx_unref(htt, &tx_done);
2408
2409 while ((skb = __skb_dequeue(&tx_ind_q))) {
2410 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2411 dev_kfree_skb_any(skb);
2412 }
2413
2414 num_mpdus = atomic_read(&htt->num_mpdus_ready);
2415
2416 while (num_mpdus) {
2417 if (ath10k_htt_rx_handle_amsdu(htt))
2418 break;
2419
2420 num_mpdus--;
2421 atomic_dec(&htt->num_mpdus_ready);
2422 }
2423
2424 while ((skb = __skb_dequeue(&rx_ind_q))) {
2425 spin_lock_bh(&htt->rx_ring.lock);
2426 ath10k_htt_rx_in_ord_ind(ar, skb);
2427 spin_unlock_bh(&htt->rx_ring.lock);
2428 dev_kfree_skb_any(skb);
2429 }
2430
2431 ath10k_htt_rx_msdu_buff_replenish(htt);
2432 }
This page took 0.082344 seconds and 5 git commands to generate.