Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
edb8236d | 18 | #include "core.h" |
5e3dd157 KV |
19 | #include "htc.h" |
20 | #include "htt.h" | |
21 | #include "txrx.h" | |
22 | #include "debug.h" | |
a9bf0506 | 23 | #include "trace.h" |
aa5b4fbc | 24 | #include "mac.h" |
5e3dd157 KV |
25 | |
26 | #include <linux/log2.h> | |
27 | ||
28 | /* slightly larger than one large A-MPDU */ | |
29 | #define HTT_RX_RING_SIZE_MIN 128 | |
30 | ||
31 | /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ | |
32 | #define HTT_RX_RING_SIZE_MAX 2048 | |
33 | ||
34 | #define HTT_RX_AVG_FRM_BYTES 1000 | |
35 | ||
36 | /* ms, very conservative */ | |
37 | #define HTT_RX_HOST_LATENCY_MAX_MS 20 | |
38 | ||
39 | /* ms, conservative */ | |
40 | #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 | |
41 | ||
42 | /* when under memory pressure rx ring refill may fail and needs a retry */ | |
43 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | |
44 | ||
f6dc2095 | 45 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); |
6c5151a9 | 46 | static void ath10k_htt_txrx_compl_task(unsigned long ptr); |
f6dc2095 | 47 | |
5e3dd157 KV |
48 | static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) |
49 | { | |
50 | int size; | |
51 | ||
52 | /* | |
53 | * It is expected that the host CPU will typically be able to | |
54 | * service the rx indication from one A-MPDU before the rx | |
55 | * indication from the subsequent A-MPDU happens, roughly 1-2 ms | |
56 | * later. However, the rx ring should be sized very conservatively, | |
57 | * to accomodate the worst reasonable delay before the host CPU | |
58 | * services a rx indication interrupt. | |
59 | * | |
60 | * The rx ring need not be kept full of empty buffers. In theory, | |
61 | * the htt host SW can dynamically track the low-water mark in the | |
62 | * rx ring, and dynamically adjust the level to which the rx ring | |
63 | * is filled with empty buffers, to dynamically meet the desired | |
64 | * low-water mark. | |
65 | * | |
66 | * In contrast, it's difficult to resize the rx ring itself, once | |
67 | * it's in use. Thus, the ring itself should be sized very | |
68 | * conservatively, while the degree to which the ring is filled | |
69 | * with empty buffers should be sized moderately conservatively. | |
70 | */ | |
71 | ||
72 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | |
73 | size = | |
74 | htt->max_throughput_mbps + | |
75 | 1000 / | |
76 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; | |
77 | ||
78 | if (size < HTT_RX_RING_SIZE_MIN) | |
79 | size = HTT_RX_RING_SIZE_MIN; | |
80 | ||
81 | if (size > HTT_RX_RING_SIZE_MAX) | |
82 | size = HTT_RX_RING_SIZE_MAX; | |
83 | ||
84 | size = roundup_pow_of_two(size); | |
85 | ||
86 | return size; | |
87 | } | |
88 | ||
89 | static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) | |
90 | { | |
91 | int size; | |
92 | ||
93 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | |
94 | size = | |
95 | htt->max_throughput_mbps * | |
96 | 1000 / | |
97 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; | |
98 | ||
99 | /* | |
100 | * Make sure the fill level is at least 1 less than the ring size. | |
101 | * Leaving 1 element empty allows the SW to easily distinguish | |
102 | * between a full ring vs. an empty ring. | |
103 | */ | |
104 | if (size >= htt->rx_ring.size) | |
105 | size = htt->rx_ring.size - 1; | |
106 | ||
107 | return size; | |
108 | } | |
109 | ||
110 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) | |
111 | { | |
112 | struct sk_buff *skb; | |
113 | struct ath10k_skb_cb *cb; | |
114 | int i; | |
115 | ||
116 | for (i = 0; i < htt->rx_ring.fill_cnt; i++) { | |
117 | skb = htt->rx_ring.netbufs_ring[i]; | |
118 | cb = ATH10K_SKB_CB(skb); | |
119 | dma_unmap_single(htt->ar->dev, cb->paddr, | |
120 | skb->len + skb_tailroom(skb), | |
121 | DMA_FROM_DEVICE); | |
122 | dev_kfree_skb_any(skb); | |
123 | } | |
124 | ||
125 | htt->rx_ring.fill_cnt = 0; | |
126 | } | |
127 | ||
128 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
129 | { | |
130 | struct htt_rx_desc *rx_desc; | |
131 | struct sk_buff *skb; | |
132 | dma_addr_t paddr; | |
133 | int ret = 0, idx; | |
134 | ||
8cc7f26c | 135 | idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
5e3dd157 KV |
136 | while (num > 0) { |
137 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | |
138 | if (!skb) { | |
139 | ret = -ENOMEM; | |
140 | goto fail; | |
141 | } | |
142 | ||
143 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) | |
144 | skb_pull(skb, | |
145 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - | |
146 | skb->data); | |
147 | ||
148 | /* Clear rx_desc attention word before posting to Rx ring */ | |
149 | rx_desc = (struct htt_rx_desc *)skb->data; | |
150 | rx_desc->attention.flags = __cpu_to_le32(0); | |
151 | ||
152 | paddr = dma_map_single(htt->ar->dev, skb->data, | |
153 | skb->len + skb_tailroom(skb), | |
154 | DMA_FROM_DEVICE); | |
155 | ||
156 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { | |
157 | dev_kfree_skb_any(skb); | |
158 | ret = -ENOMEM; | |
159 | goto fail; | |
160 | } | |
161 | ||
162 | ATH10K_SKB_CB(skb)->paddr = paddr; | |
163 | htt->rx_ring.netbufs_ring[idx] = skb; | |
164 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); | |
165 | htt->rx_ring.fill_cnt++; | |
166 | ||
167 | num--; | |
168 | idx++; | |
169 | idx &= htt->rx_ring.size_mask; | |
170 | } | |
171 | ||
172 | fail: | |
8cc7f26c | 173 | *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); |
5e3dd157 KV |
174 | return ret; |
175 | } | |
176 | ||
177 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
178 | { | |
179 | lockdep_assert_held(&htt->rx_ring.lock); | |
180 | return __ath10k_htt_rx_ring_fill_n(htt, num); | |
181 | } | |
182 | ||
183 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) | |
184 | { | |
6e712d42 | 185 | int ret, num_deficit, num_to_fill; |
5e3dd157 | 186 | |
6e712d42 MK |
187 | /* Refilling the whole RX ring buffer proves to be a bad idea. The |
188 | * reason is RX may take up significant amount of CPU cycles and starve | |
189 | * other tasks, e.g. TX on an ethernet device while acting as a bridge | |
190 | * with ath10k wlan interface. This ended up with very poor performance | |
191 | * once CPU the host system was overwhelmed with RX on ath10k. | |
192 | * | |
193 | * By limiting the number of refills the replenishing occurs | |
194 | * progressively. This in turns makes use of the fact tasklets are | |
195 | * processed in FIFO order. This means actual RX processing can starve | |
196 | * out refilling. If there's not enough buffers on RX ring FW will not | |
197 | * report RX until it is refilled with enough buffers. This | |
198 | * automatically balances load wrt to CPU power. | |
199 | * | |
200 | * This probably comes at a cost of lower maximum throughput but | |
201 | * improves the avarage and stability. */ | |
5e3dd157 | 202 | spin_lock_bh(&htt->rx_ring.lock); |
6e712d42 MK |
203 | num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; |
204 | num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); | |
205 | num_deficit -= num_to_fill; | |
5e3dd157 KV |
206 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); |
207 | if (ret == -ENOMEM) { | |
208 | /* | |
209 | * Failed to fill it to the desired level - | |
210 | * we'll start a timer and try again next time. | |
211 | * As long as enough buffers are left in the ring for | |
212 | * another A-MPDU rx, no special recovery is needed. | |
213 | */ | |
214 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + | |
215 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); | |
6e712d42 MK |
216 | } else if (num_deficit > 0) { |
217 | tasklet_schedule(&htt->rx_replenish_task); | |
5e3dd157 KV |
218 | } |
219 | spin_unlock_bh(&htt->rx_ring.lock); | |
220 | } | |
221 | ||
222 | static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) | |
223 | { | |
224 | struct ath10k_htt *htt = (struct ath10k_htt *)arg; | |
af762c0b | 225 | |
5e3dd157 KV |
226 | ath10k_htt_rx_msdu_buff_replenish(htt); |
227 | } | |
228 | ||
3e841fd0 | 229 | static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt) |
5e3dd157 | 230 | { |
3e841fd0 MK |
231 | struct sk_buff *skb; |
232 | int i; | |
233 | ||
234 | for (i = 0; i < htt->rx_ring.size; i++) { | |
235 | skb = htt->rx_ring.netbufs_ring[i]; | |
236 | if (!skb) | |
237 | continue; | |
238 | ||
239 | dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr, | |
240 | skb->len + skb_tailroom(skb), | |
241 | DMA_FROM_DEVICE); | |
242 | dev_kfree_skb_any(skb); | |
243 | htt->rx_ring.netbufs_ring[i] = NULL; | |
244 | } | |
245 | } | |
5e3dd157 | 246 | |
95bf21f9 | 247 | void ath10k_htt_rx_free(struct ath10k_htt *htt) |
3e841fd0 | 248 | { |
5e3dd157 | 249 | del_timer_sync(&htt->rx_ring.refill_retry_timer); |
6e712d42 | 250 | tasklet_kill(&htt->rx_replenish_task); |
6c5151a9 MK |
251 | tasklet_kill(&htt->txrx_compl_task); |
252 | ||
253 | skb_queue_purge(&htt->tx_compl_q); | |
254 | skb_queue_purge(&htt->rx_compl_q); | |
5e3dd157 | 255 | |
3e841fd0 | 256 | ath10k_htt_rx_ring_clean_up(htt); |
5e3dd157 KV |
257 | |
258 | dma_free_coherent(htt->ar->dev, | |
259 | (htt->rx_ring.size * | |
260 | sizeof(htt->rx_ring.paddrs_ring)), | |
261 | htt->rx_ring.paddrs_ring, | |
262 | htt->rx_ring.base_paddr); | |
263 | ||
264 | dma_free_coherent(htt->ar->dev, | |
265 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
266 | htt->rx_ring.alloc_idx.vaddr, | |
267 | htt->rx_ring.alloc_idx.paddr); | |
268 | ||
269 | kfree(htt->rx_ring.netbufs_ring); | |
270 | } | |
271 | ||
272 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |
273 | { | |
7aa7a72a | 274 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
275 | int idx; |
276 | struct sk_buff *msdu; | |
277 | ||
45967089 | 278 | lockdep_assert_held(&htt->rx_ring.lock); |
5e3dd157 | 279 | |
8d60ee87 | 280 | if (htt->rx_ring.fill_cnt == 0) { |
7aa7a72a | 281 | ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); |
8d60ee87 MK |
282 | return NULL; |
283 | } | |
5e3dd157 KV |
284 | |
285 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
286 | msdu = htt->rx_ring.netbufs_ring[idx]; | |
3e841fd0 | 287 | htt->rx_ring.netbufs_ring[idx] = NULL; |
5e3dd157 KV |
288 | |
289 | idx++; | |
290 | idx &= htt->rx_ring.size_mask; | |
291 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; | |
292 | htt->rx_ring.fill_cnt--; | |
293 | ||
4de02806 MK |
294 | dma_unmap_single(htt->ar->dev, |
295 | ATH10K_SKB_CB(msdu)->paddr, | |
296 | msdu->len + skb_tailroom(msdu), | |
297 | DMA_FROM_DEVICE); | |
298 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | |
299 | msdu->data, msdu->len + skb_tailroom(msdu)); | |
4de02806 | 300 | |
5e3dd157 KV |
301 | return msdu; |
302 | } | |
303 | ||
d84dd60f | 304 | /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ |
5e3dd157 KV |
305 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, |
306 | u8 **fw_desc, int *fw_desc_len, | |
f0e2770f | 307 | struct sk_buff_head *amsdu) |
5e3dd157 | 308 | { |
7aa7a72a | 309 | struct ath10k *ar = htt->ar; |
5e3dd157 | 310 | int msdu_len, msdu_chaining = 0; |
9aa505d2 | 311 | struct sk_buff *msdu; |
5e3dd157 KV |
312 | struct htt_rx_desc *rx_desc; |
313 | ||
45967089 MK |
314 | lockdep_assert_held(&htt->rx_ring.lock); |
315 | ||
9aa505d2 | 316 | for (;;) { |
5e3dd157 KV |
317 | int last_msdu, msdu_len_invalid, msdu_chained; |
318 | ||
9aa505d2 MK |
319 | msdu = ath10k_htt_rx_netbuf_pop(htt); |
320 | if (!msdu) { | |
9aa505d2 | 321 | __skb_queue_purge(amsdu); |
e0bd7513 | 322 | return -ENOENT; |
9aa505d2 MK |
323 | } |
324 | ||
325 | __skb_queue_tail(amsdu, msdu); | |
326 | ||
5e3dd157 KV |
327 | rx_desc = (struct htt_rx_desc *)msdu->data; |
328 | ||
329 | /* FIXME: we must report msdu payload since this is what caller | |
330 | * expects now */ | |
331 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
332 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
333 | ||
334 | /* | |
335 | * Sanity check - confirm the HW is finished filling in the | |
336 | * rx data. | |
337 | * If the HW and SW are working correctly, then it's guaranteed | |
338 | * that the HW's MAC DMA is done before this point in the SW. | |
339 | * To prevent the case that we handle a stale Rx descriptor, | |
340 | * just assert for now until we have a way to recover. | |
341 | */ | |
342 | if (!(__le32_to_cpu(rx_desc->attention.flags) | |
343 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
9aa505d2 | 344 | __skb_queue_purge(amsdu); |
e0bd7513 | 345 | return -EIO; |
5e3dd157 KV |
346 | } |
347 | ||
348 | /* | |
349 | * Copy the FW rx descriptor for this MSDU from the rx | |
350 | * indication message into the MSDU's netbuf. HL uses the | |
351 | * same rx indication message definition as LL, and simply | |
352 | * appends new info (fields from the HW rx desc, and the | |
353 | * MSDU payload itself). So, the offset into the rx | |
354 | * indication message only has to account for the standard | |
355 | * offset of the per-MSDU FW rx desc info within the | |
356 | * message, and how many bytes of the per-MSDU FW rx desc | |
357 | * info have already been consumed. (And the endianness of | |
358 | * the host, since for a big-endian host, the rx ind | |
359 | * message contents, including the per-MSDU rx desc bytes, | |
360 | * were byteswapped during upload.) | |
361 | */ | |
362 | if (*fw_desc_len > 0) { | |
363 | rx_desc->fw_desc.info0 = **fw_desc; | |
364 | /* | |
365 | * The target is expected to only provide the basic | |
366 | * per-MSDU rx descriptors. Just to be sure, verify | |
367 | * that the target has not attached extension data | |
368 | * (e.g. LRO flow ID). | |
369 | */ | |
370 | ||
371 | /* or more, if there's extension data */ | |
372 | (*fw_desc)++; | |
373 | (*fw_desc_len)--; | |
374 | } else { | |
375 | /* | |
376 | * When an oversized AMSDU happened, FW will lost | |
377 | * some of MSDU status - in this case, the FW | |
378 | * descriptors provided will be less than the | |
379 | * actual MSDUs inside this MPDU. Mark the FW | |
380 | * descriptors so that it will still deliver to | |
381 | * upper stack, if no CRC error for this MPDU. | |
382 | * | |
383 | * FIX THIS - the FW descriptors are actually for | |
384 | * MSDUs in the end of this A-MSDU instead of the | |
385 | * beginning. | |
386 | */ | |
387 | rx_desc->fw_desc.info0 = 0; | |
388 | } | |
389 | ||
390 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) | |
391 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | | |
392 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); | |
393 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), | |
394 | RX_MSDU_START_INFO0_MSDU_LENGTH); | |
395 | msdu_chained = rx_desc->frag_info.ring2_more_count; | |
396 | ||
397 | if (msdu_len_invalid) | |
398 | msdu_len = 0; | |
399 | ||
400 | skb_trim(msdu, 0); | |
401 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); | |
402 | msdu_len -= msdu->len; | |
403 | ||
9aa505d2 | 404 | /* Note: Chained buffers do not contain rx descriptor */ |
5e3dd157 | 405 | while (msdu_chained--) { |
9aa505d2 MK |
406 | msdu = ath10k_htt_rx_netbuf_pop(htt); |
407 | if (!msdu) { | |
9aa505d2 | 408 | __skb_queue_purge(amsdu); |
e0bd7513 | 409 | return -ENOENT; |
b30595ae MK |
410 | } |
411 | ||
9aa505d2 MK |
412 | __skb_queue_tail(amsdu, msdu); |
413 | skb_trim(msdu, 0); | |
414 | skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); | |
415 | msdu_len -= msdu->len; | |
ede9c8e0 | 416 | msdu_chaining = 1; |
5e3dd157 KV |
417 | } |
418 | ||
5e3dd157 KV |
419 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & |
420 | RX_MSDU_END_INFO0_LAST_MSDU; | |
421 | ||
b04e204f | 422 | trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, |
a0883cf7 | 423 | sizeof(*rx_desc) - sizeof(u32)); |
d8bb26b9 | 424 | |
9aa505d2 MK |
425 | if (last_msdu) |
426 | break; | |
5e3dd157 | 427 | } |
5e3dd157 | 428 | |
9aa505d2 | 429 | if (skb_queue_empty(amsdu)) |
d84dd60f JD |
430 | msdu_chaining = -1; |
431 | ||
5e3dd157 KV |
432 | /* |
433 | * Don't refill the ring yet. | |
434 | * | |
435 | * First, the elements popped here are still in use - it is not | |
436 | * safe to overwrite them until the matching call to | |
437 | * mpdu_desc_list_next. Second, for efficiency it is preferable to | |
438 | * refill the rx ring with 1 PPDU's worth of rx buffers (something | |
439 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers | |
440 | * (something like 3 buffers). Consequently, we'll rely on the txrx | |
441 | * SW to tell us when it is done pulling all the PPDU's rx buffers | |
442 | * out of the rx ring, and then refill it just once. | |
443 | */ | |
444 | ||
445 | return msdu_chaining; | |
446 | } | |
447 | ||
6e712d42 MK |
448 | static void ath10k_htt_rx_replenish_task(unsigned long ptr) |
449 | { | |
450 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | |
af762c0b | 451 | |
6e712d42 MK |
452 | ath10k_htt_rx_msdu_buff_replenish(htt); |
453 | } | |
454 | ||
95bf21f9 | 455 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt) |
5e3dd157 | 456 | { |
7aa7a72a | 457 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
458 | dma_addr_t paddr; |
459 | void *vaddr; | |
bd8bdbb6 | 460 | size_t size; |
5e3dd157 KV |
461 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; |
462 | ||
51fc7d74 MK |
463 | htt->rx_confused = false; |
464 | ||
5e3dd157 KV |
465 | htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); |
466 | if (!is_power_of_2(htt->rx_ring.size)) { | |
7aa7a72a | 467 | ath10k_warn(ar, "htt rx ring size is not power of 2\n"); |
5e3dd157 KV |
468 | return -EINVAL; |
469 | } | |
470 | ||
471 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; | |
472 | ||
473 | /* | |
474 | * Set the initial value for the level to which the rx ring | |
475 | * should be filled, based on the max throughput and the | |
476 | * worst likely latency for the host to fill the rx ring | |
477 | * with new buffers. In theory, this fill level can be | |
478 | * dynamically adjusted from the initial value set here, to | |
479 | * reflect the actual host latency rather than a | |
480 | * conservative assumption about the host latency. | |
481 | */ | |
482 | htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); | |
483 | ||
484 | htt->rx_ring.netbufs_ring = | |
3e841fd0 | 485 | kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *), |
5e3dd157 KV |
486 | GFP_KERNEL); |
487 | if (!htt->rx_ring.netbufs_ring) | |
488 | goto err_netbuf; | |
489 | ||
bd8bdbb6 KV |
490 | size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); |
491 | ||
492 | vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA); | |
5e3dd157 KV |
493 | if (!vaddr) |
494 | goto err_dma_ring; | |
495 | ||
496 | htt->rx_ring.paddrs_ring = vaddr; | |
497 | htt->rx_ring.base_paddr = paddr; | |
498 | ||
499 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
500 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
501 | &paddr, GFP_DMA); | |
502 | if (!vaddr) | |
503 | goto err_dma_idx; | |
504 | ||
505 | htt->rx_ring.alloc_idx.vaddr = vaddr; | |
506 | htt->rx_ring.alloc_idx.paddr = paddr; | |
507 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; | |
508 | *htt->rx_ring.alloc_idx.vaddr = 0; | |
509 | ||
510 | /* Initialize the Rx refill retry timer */ | |
511 | setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); | |
512 | ||
513 | spin_lock_init(&htt->rx_ring.lock); | |
514 | ||
515 | htt->rx_ring.fill_cnt = 0; | |
516 | if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) | |
517 | goto err_fill_ring; | |
518 | ||
6e712d42 MK |
519 | tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, |
520 | (unsigned long)htt); | |
521 | ||
6c5151a9 MK |
522 | skb_queue_head_init(&htt->tx_compl_q); |
523 | skb_queue_head_init(&htt->rx_compl_q); | |
524 | ||
525 | tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, | |
526 | (unsigned long)htt); | |
527 | ||
7aa7a72a | 528 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", |
5e3dd157 KV |
529 | htt->rx_ring.size, htt->rx_ring.fill_level); |
530 | return 0; | |
531 | ||
532 | err_fill_ring: | |
533 | ath10k_htt_rx_ring_free(htt); | |
534 | dma_free_coherent(htt->ar->dev, | |
535 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
536 | htt->rx_ring.alloc_idx.vaddr, | |
537 | htt->rx_ring.alloc_idx.paddr); | |
538 | err_dma_idx: | |
539 | dma_free_coherent(htt->ar->dev, | |
540 | (htt->rx_ring.size * | |
541 | sizeof(htt->rx_ring.paddrs_ring)), | |
542 | htt->rx_ring.paddrs_ring, | |
543 | htt->rx_ring.base_paddr); | |
544 | err_dma_ring: | |
545 | kfree(htt->rx_ring.netbufs_ring); | |
546 | err_netbuf: | |
547 | return -ENOMEM; | |
548 | } | |
549 | ||
7aa7a72a MK |
550 | static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, |
551 | enum htt_rx_mpdu_encrypt_type type) | |
5e3dd157 KV |
552 | { |
553 | switch (type) { | |
890d3b2a MK |
554 | case HTT_RX_MPDU_ENCRYPT_NONE: |
555 | return 0; | |
5e3dd157 KV |
556 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
557 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
890d3b2a | 558 | return IEEE80211_WEP_IV_LEN; |
5e3dd157 | 559 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
5e3dd157 | 560 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: |
890d3b2a | 561 | return IEEE80211_TKIP_IV_LEN; |
5e3dd157 | 562 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
890d3b2a MK |
563 | return IEEE80211_CCMP_HDR_LEN; |
564 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
565 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
566 | break; | |
5e3dd157 KV |
567 | } |
568 | ||
890d3b2a | 569 | ath10k_warn(ar, "unsupported encryption type %d\n", type); |
5e3dd157 KV |
570 | return 0; |
571 | } | |
572 | ||
890d3b2a MK |
573 | #define MICHAEL_MIC_LEN 8 |
574 | ||
7aa7a72a MK |
575 | static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, |
576 | enum htt_rx_mpdu_encrypt_type type) | |
5e3dd157 KV |
577 | { |
578 | switch (type) { | |
579 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
890d3b2a | 580 | return 0; |
5e3dd157 KV |
581 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
582 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
890d3b2a | 583 | return IEEE80211_WEP_ICV_LEN; |
5e3dd157 KV |
584 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
585 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
890d3b2a | 586 | return IEEE80211_TKIP_ICV_LEN; |
5e3dd157 | 587 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
890d3b2a MK |
588 | return IEEE80211_CCMP_MIC_LEN; |
589 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
590 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
591 | break; | |
5e3dd157 KV |
592 | } |
593 | ||
890d3b2a | 594 | ath10k_warn(ar, "unsupported encryption type %d\n", type); |
5e3dd157 KV |
595 | return 0; |
596 | } | |
597 | ||
f6dc2095 MK |
598 | struct rfc1042_hdr { |
599 | u8 llc_dsap; | |
600 | u8 llc_ssap; | |
601 | u8 llc_ctrl; | |
602 | u8 snap_oui[3]; | |
603 | __be16 snap_type; | |
604 | } __packed; | |
605 | ||
606 | struct amsdu_subframe_hdr { | |
607 | u8 dst[ETH_ALEN]; | |
608 | u8 src[ETH_ALEN]; | |
609 | __be16 len; | |
610 | } __packed; | |
611 | ||
73539b40 JD |
612 | static const u8 rx_legacy_rate_idx[] = { |
613 | 3, /* 0x00 - 11Mbps */ | |
614 | 2, /* 0x01 - 5.5Mbps */ | |
615 | 1, /* 0x02 - 2Mbps */ | |
616 | 0, /* 0x03 - 1Mbps */ | |
617 | 3, /* 0x04 - 11Mbps */ | |
618 | 2, /* 0x05 - 5.5Mbps */ | |
619 | 1, /* 0x06 - 2Mbps */ | |
620 | 0, /* 0x07 - 1Mbps */ | |
621 | 10, /* 0x08 - 48Mbps */ | |
622 | 8, /* 0x09 - 24Mbps */ | |
623 | 6, /* 0x0A - 12Mbps */ | |
624 | 4, /* 0x0B - 6Mbps */ | |
625 | 11, /* 0x0C - 54Mbps */ | |
626 | 9, /* 0x0D - 36Mbps */ | |
627 | 7, /* 0x0E - 18Mbps */ | |
628 | 5, /* 0x0F - 9Mbps */ | |
629 | }; | |
630 | ||
87326c97 | 631 | static void ath10k_htt_rx_h_rates(struct ath10k *ar, |
b9fd8a84 MK |
632 | struct ieee80211_rx_status *status, |
633 | struct htt_rx_desc *rxd) | |
73539b40 | 634 | { |
b9fd8a84 | 635 | enum ieee80211_band band; |
73539b40 | 636 | u8 cck, rate, rate_idx, bw, sgi, mcs, nss; |
73539b40 | 637 | u8 preamble = 0; |
b9fd8a84 | 638 | u32 info1, info2, info3; |
73539b40 | 639 | |
b9fd8a84 MK |
640 | /* Band value can't be set as undefined but freq can be 0 - use that to |
641 | * determine whether band is provided. | |
642 | * | |
643 | * FIXME: Perhaps this can go away if CCK rate reporting is a little | |
644 | * reworked? | |
645 | */ | |
646 | if (!status->freq) | |
73539b40 JD |
647 | return; |
648 | ||
b9fd8a84 MK |
649 | band = status->band; |
650 | info1 = __le32_to_cpu(rxd->ppdu_start.info1); | |
651 | info2 = __le32_to_cpu(rxd->ppdu_start.info2); | |
652 | info3 = __le32_to_cpu(rxd->ppdu_start.info3); | |
653 | ||
654 | preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); | |
73539b40 JD |
655 | |
656 | switch (preamble) { | |
657 | case HTT_RX_LEGACY: | |
b9fd8a84 MK |
658 | cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; |
659 | rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); | |
73539b40 JD |
660 | rate_idx = 0; |
661 | ||
662 | if (rate < 0x08 || rate > 0x0F) | |
663 | break; | |
664 | ||
665 | switch (band) { | |
666 | case IEEE80211_BAND_2GHZ: | |
667 | if (cck) | |
668 | rate &= ~BIT(3); | |
669 | rate_idx = rx_legacy_rate_idx[rate]; | |
670 | break; | |
671 | case IEEE80211_BAND_5GHZ: | |
672 | rate_idx = rx_legacy_rate_idx[rate]; | |
673 | /* We are using same rate table registering | |
674 | HW - ath10k_rates[]. In case of 5GHz skip | |
675 | CCK rates, so -4 here */ | |
676 | rate_idx -= 4; | |
677 | break; | |
678 | default: | |
679 | break; | |
680 | } | |
681 | ||
682 | status->rate_idx = rate_idx; | |
683 | break; | |
684 | case HTT_RX_HT: | |
685 | case HTT_RX_HT_WITH_TXBF: | |
b9fd8a84 MK |
686 | /* HT-SIG - Table 20-11 in info2 and info3 */ |
687 | mcs = info2 & 0x1F; | |
73539b40 | 688 | nss = mcs >> 3; |
b9fd8a84 MK |
689 | bw = (info2 >> 7) & 1; |
690 | sgi = (info3 >> 7) & 1; | |
73539b40 JD |
691 | |
692 | status->rate_idx = mcs; | |
693 | status->flag |= RX_FLAG_HT; | |
694 | if (sgi) | |
695 | status->flag |= RX_FLAG_SHORT_GI; | |
696 | if (bw) | |
697 | status->flag |= RX_FLAG_40MHZ; | |
698 | break; | |
699 | case HTT_RX_VHT: | |
700 | case HTT_RX_VHT_WITH_TXBF: | |
b9fd8a84 | 701 | /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 |
73539b40 | 702 | TODO check this */ |
b9fd8a84 MK |
703 | mcs = (info3 >> 4) & 0x0F; |
704 | nss = ((info2 >> 10) & 0x07) + 1; | |
705 | bw = info2 & 3; | |
706 | sgi = info3 & 1; | |
73539b40 JD |
707 | |
708 | status->rate_idx = mcs; | |
709 | status->vht_nss = nss; | |
710 | ||
711 | if (sgi) | |
712 | status->flag |= RX_FLAG_SHORT_GI; | |
713 | ||
714 | switch (bw) { | |
715 | /* 20MHZ */ | |
716 | case 0: | |
717 | break; | |
718 | /* 40MHZ */ | |
719 | case 1: | |
720 | status->flag |= RX_FLAG_40MHZ; | |
721 | break; | |
722 | /* 80MHZ */ | |
723 | case 2: | |
724 | status->vht_flag |= RX_VHT_FLAG_80MHZ; | |
725 | } | |
726 | ||
727 | status->flag |= RX_FLAG_VHT; | |
728 | break; | |
729 | default: | |
730 | break; | |
731 | } | |
732 | } | |
733 | ||
36653f05 JD |
734 | static bool ath10k_htt_rx_h_channel(struct ath10k *ar, |
735 | struct ieee80211_rx_status *status) | |
736 | { | |
737 | struct ieee80211_channel *ch; | |
738 | ||
739 | spin_lock_bh(&ar->data_lock); | |
740 | ch = ar->scan_channel; | |
741 | if (!ch) | |
742 | ch = ar->rx_channel; | |
743 | spin_unlock_bh(&ar->data_lock); | |
744 | ||
745 | if (!ch) | |
746 | return false; | |
747 | ||
748 | status->band = ch->band; | |
749 | status->freq = ch->center_freq; | |
750 | ||
751 | return true; | |
752 | } | |
753 | ||
b9fd8a84 MK |
754 | static void ath10k_htt_rx_h_signal(struct ath10k *ar, |
755 | struct ieee80211_rx_status *status, | |
756 | struct htt_rx_desc *rxd) | |
757 | { | |
758 | /* FIXME: Get real NF */ | |
759 | status->signal = ATH10K_DEFAULT_NOISE_FLOOR + | |
760 | rxd->ppdu_start.rssi_comb; | |
761 | status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; | |
762 | } | |
763 | ||
764 | static void ath10k_htt_rx_h_mactime(struct ath10k *ar, | |
765 | struct ieee80211_rx_status *status, | |
766 | struct htt_rx_desc *rxd) | |
767 | { | |
768 | /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This | |
769 | * means all prior MSDUs in a PPDU are reported to mac80211 without the | |
770 | * TSF. Is it worth holding frames until end of PPDU is known? | |
771 | * | |
772 | * FIXME: Can we get/compute 64bit TSF? | |
773 | */ | |
774 | status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp); | |
775 | status->flag |= RX_FLAG_MACTIME_END; | |
776 | } | |
777 | ||
778 | static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, | |
779 | struct sk_buff_head *amsdu, | |
780 | struct ieee80211_rx_status *status) | |
781 | { | |
782 | struct sk_buff *first; | |
783 | struct htt_rx_desc *rxd; | |
784 | bool is_first_ppdu; | |
785 | bool is_last_ppdu; | |
786 | ||
787 | if (skb_queue_empty(amsdu)) | |
788 | return; | |
789 | ||
790 | first = skb_peek(amsdu); | |
791 | rxd = (void *)first->data - sizeof(*rxd); | |
792 | ||
793 | is_first_ppdu = !!(rxd->attention.flags & | |
794 | __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); | |
795 | is_last_ppdu = !!(rxd->attention.flags & | |
796 | __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); | |
797 | ||
798 | if (is_first_ppdu) { | |
799 | /* New PPDU starts so clear out the old per-PPDU status. */ | |
800 | status->freq = 0; | |
801 | status->rate_idx = 0; | |
802 | status->vht_nss = 0; | |
803 | status->vht_flag &= ~RX_VHT_FLAG_80MHZ; | |
804 | status->flag &= ~(RX_FLAG_HT | | |
805 | RX_FLAG_VHT | | |
806 | RX_FLAG_SHORT_GI | | |
807 | RX_FLAG_40MHZ | | |
808 | RX_FLAG_MACTIME_END); | |
809 | status->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
810 | ||
811 | ath10k_htt_rx_h_signal(ar, status, rxd); | |
812 | ath10k_htt_rx_h_channel(ar, status); | |
813 | ath10k_htt_rx_h_rates(ar, status, rxd); | |
814 | } | |
815 | ||
816 | if (is_last_ppdu) | |
817 | ath10k_htt_rx_h_mactime(ar, status, rxd); | |
818 | } | |
819 | ||
76f5329a JD |
820 | static const char * const tid_to_ac[] = { |
821 | "BE", | |
822 | "BK", | |
823 | "BK", | |
824 | "BE", | |
825 | "VI", | |
826 | "VI", | |
827 | "VO", | |
828 | "VO", | |
829 | }; | |
830 | ||
831 | static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) | |
832 | { | |
833 | u8 *qc; | |
834 | int tid; | |
835 | ||
836 | if (!ieee80211_is_data_qos(hdr->frame_control)) | |
837 | return ""; | |
838 | ||
839 | qc = ieee80211_get_qos_ctl(hdr); | |
840 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | |
841 | if (tid < 8) | |
842 | snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); | |
843 | else | |
844 | snprintf(out, size, "tid %d", tid); | |
845 | ||
846 | return out; | |
847 | } | |
848 | ||
85f6d7cf JD |
849 | static void ath10k_process_rx(struct ath10k *ar, |
850 | struct ieee80211_rx_status *rx_status, | |
851 | struct sk_buff *skb) | |
73539b40 JD |
852 | { |
853 | struct ieee80211_rx_status *status; | |
76f5329a JD |
854 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
855 | char tid[32]; | |
73539b40 | 856 | |
85f6d7cf JD |
857 | status = IEEE80211_SKB_RXCB(skb); |
858 | *status = *rx_status; | |
73539b40 | 859 | |
7aa7a72a | 860 | ath10k_dbg(ar, ATH10K_DBG_DATA, |
76f5329a | 861 | "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", |
85f6d7cf JD |
862 | skb, |
863 | skb->len, | |
76f5329a JD |
864 | ieee80211_get_SA(hdr), |
865 | ath10k_get_tid(hdr, tid, sizeof(tid)), | |
866 | is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? | |
867 | "mcast" : "ucast", | |
868 | (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, | |
73539b40 JD |
869 | status->flag == 0 ? "legacy" : "", |
870 | status->flag & RX_FLAG_HT ? "ht" : "", | |
871 | status->flag & RX_FLAG_VHT ? "vht" : "", | |
872 | status->flag & RX_FLAG_40MHZ ? "40" : "", | |
873 | status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", | |
874 | status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", | |
875 | status->rate_idx, | |
876 | status->vht_nss, | |
877 | status->freq, | |
87326c97 | 878 | status->band, status->flag, |
78433f96 | 879 | !!(status->flag & RX_FLAG_FAILED_FCS_CRC), |
76f5329a JD |
880 | !!(status->flag & RX_FLAG_MMIC_ERROR), |
881 | !!(status->flag & RX_FLAG_AMSDU_MORE)); | |
7aa7a72a | 882 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", |
85f6d7cf | 883 | skb->data, skb->len); |
5ce8e7fd RM |
884 | trace_ath10k_rx_hdr(ar, skb->data, skb->len); |
885 | trace_ath10k_rx_payload(ar, skb->data, skb->len); | |
73539b40 | 886 | |
85f6d7cf | 887 | ieee80211_rx(ar->hw, skb); |
73539b40 JD |
888 | } |
889 | ||
d960c369 MK |
890 | static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) |
891 | { | |
892 | /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ | |
893 | return round_up(ieee80211_hdrlen(hdr->frame_control), 4); | |
894 | } | |
895 | ||
581c25f8 MK |
896 | static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, |
897 | struct sk_buff *msdu, | |
898 | struct ieee80211_rx_status *status, | |
899 | enum htt_rx_mpdu_encrypt_type enctype, | |
900 | bool is_decrypted) | |
5e3dd157 | 901 | { |
581c25f8 | 902 | struct ieee80211_hdr *hdr; |
5e3dd157 | 903 | struct htt_rx_desc *rxd; |
581c25f8 MK |
904 | size_t hdr_len; |
905 | size_t crypto_len; | |
906 | bool is_first; | |
907 | bool is_last; | |
908 | ||
909 | rxd = (void *)msdu->data - sizeof(*rxd); | |
910 | is_first = !!(rxd->msdu_end.info0 & | |
911 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); | |
912 | is_last = !!(rxd->msdu_end.info0 & | |
913 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); | |
914 | ||
915 | /* Delivered decapped frame: | |
916 | * [802.11 header] | |
917 | * [crypto param] <-- can be trimmed if !fcs_err && | |
918 | * !decrypt_err && !peer_idx_invalid | |
919 | * [amsdu header] <-- only if A-MSDU | |
920 | * [rfc1042/llc] | |
921 | * [payload] | |
922 | * [FCS] <-- at end, needs to be trimmed | |
923 | */ | |
924 | ||
925 | /* This probably shouldn't happen but warn just in case */ | |
926 | if (unlikely(WARN_ON_ONCE(!is_first))) | |
927 | return; | |
928 | ||
929 | /* This probably shouldn't happen but warn just in case */ | |
930 | if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) | |
931 | return; | |
932 | ||
933 | skb_trim(msdu, msdu->len - FCS_LEN); | |
934 | ||
935 | /* In most cases this will be true for sniffed frames. It makes sense | |
936 | * to deliver them as-is without stripping the crypto param. This would | |
937 | * also make sense for software based decryption (which is not | |
938 | * implemented in ath10k). | |
939 | * | |
940 | * If there's no error then the frame is decrypted. At least that is | |
941 | * the case for frames that come in via fragmented rx indication. | |
942 | */ | |
943 | if (!is_decrypted) | |
944 | return; | |
945 | ||
946 | /* The payload is decrypted so strip crypto params. Start from tail | |
947 | * since hdr is used to compute some stuff. | |
948 | */ | |
949 | ||
950 | hdr = (void *)msdu->data; | |
951 | ||
952 | /* Tail */ | |
953 | skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); | |
954 | ||
955 | /* MMIC */ | |
956 | if (!ieee80211_has_morefrags(hdr->frame_control) && | |
957 | enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) | |
958 | skb_trim(msdu, msdu->len - 8); | |
959 | ||
960 | /* Head */ | |
961 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
962 | crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); | |
963 | ||
964 | memmove((void *)msdu->data + crypto_len, | |
965 | (void *)msdu->data, hdr_len); | |
966 | skb_pull(msdu, crypto_len); | |
967 | } | |
968 | ||
969 | static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, | |
970 | struct sk_buff *msdu, | |
971 | struct ieee80211_rx_status *status, | |
972 | const u8 first_hdr[64]) | |
973 | { | |
f6dc2095 | 974 | struct ieee80211_hdr *hdr; |
581c25f8 MK |
975 | size_t hdr_len; |
976 | u8 da[ETH_ALEN]; | |
977 | u8 sa[ETH_ALEN]; | |
5e3dd157 | 978 | |
581c25f8 MK |
979 | /* Delivered decapped frame: |
980 | * [nwifi 802.11 header] <-- replaced with 802.11 hdr | |
981 | * [rfc1042/llc] | |
982 | * | |
983 | * Note: The nwifi header doesn't have QoS Control and is | |
984 | * (always?) a 3addr frame. | |
985 | * | |
986 | * Note2: There's no A-MSDU subframe header. Even if it's part | |
987 | * of an A-MSDU. | |
988 | */ | |
9aa505d2 | 989 | |
581c25f8 MK |
990 | /* pull decapped header and copy SA & DA */ |
991 | hdr = (struct ieee80211_hdr *)msdu->data; | |
992 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); | |
993 | ether_addr_copy(da, ieee80211_get_DA(hdr)); | |
994 | ether_addr_copy(sa, ieee80211_get_SA(hdr)); | |
995 | skb_pull(msdu, hdr_len); | |
5e3dd157 | 996 | |
581c25f8 MK |
997 | /* push original 802.11 header */ |
998 | hdr = (struct ieee80211_hdr *)first_hdr; | |
f6dc2095 | 999 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
581c25f8 | 1000 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
5e3dd157 | 1001 | |
581c25f8 MK |
1002 | /* original 802.11 header has a different DA and in |
1003 | * case of 4addr it may also have different SA | |
1004 | */ | |
1005 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1006 | ether_addr_copy(ieee80211_get_DA(hdr), da); | |
1007 | ether_addr_copy(ieee80211_get_SA(hdr), sa); | |
1008 | } | |
5e3dd157 | 1009 | |
581c25f8 MK |
1010 | static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, |
1011 | struct sk_buff *msdu, | |
1012 | enum htt_rx_mpdu_encrypt_type enctype) | |
1013 | { | |
1014 | struct ieee80211_hdr *hdr; | |
1015 | struct htt_rx_desc *rxd; | |
1016 | size_t hdr_len, crypto_len; | |
1017 | void *rfc1042; | |
1018 | bool is_first, is_last, is_amsdu; | |
e3fbf8d2 | 1019 | |
581c25f8 MK |
1020 | rxd = (void *)msdu->data - sizeof(*rxd); |
1021 | hdr = (void *)rxd->rx_hdr_status; | |
f6dc2095 | 1022 | |
581c25f8 MK |
1023 | is_first = !!(rxd->msdu_end.info0 & |
1024 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); | |
1025 | is_last = !!(rxd->msdu_end.info0 & | |
1026 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); | |
1027 | is_amsdu = !(is_first && is_last); | |
5e3dd157 | 1028 | |
581c25f8 | 1029 | rfc1042 = hdr; |
5e3dd157 | 1030 | |
581c25f8 MK |
1031 | if (is_first) { |
1032 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1033 | crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); | |
652de35e | 1034 | |
581c25f8 MK |
1035 | rfc1042 += round_up(hdr_len, 4) + |
1036 | round_up(crypto_len, 4); | |
f6dc2095 | 1037 | } |
5e3dd157 | 1038 | |
581c25f8 MK |
1039 | if (is_amsdu) |
1040 | rfc1042 += sizeof(struct amsdu_subframe_hdr); | |
1041 | ||
1042 | return rfc1042; | |
5e3dd157 KV |
1043 | } |
1044 | ||
581c25f8 MK |
1045 | static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, |
1046 | struct sk_buff *msdu, | |
1047 | struct ieee80211_rx_status *status, | |
1048 | const u8 first_hdr[64], | |
1049 | enum htt_rx_mpdu_encrypt_type enctype) | |
5e3dd157 | 1050 | { |
5e3dd157 | 1051 | struct ieee80211_hdr *hdr; |
581c25f8 MK |
1052 | struct ethhdr *eth; |
1053 | size_t hdr_len; | |
e3fbf8d2 | 1054 | void *rfc1042; |
581c25f8 MK |
1055 | u8 da[ETH_ALEN]; |
1056 | u8 sa[ETH_ALEN]; | |
5e3dd157 | 1057 | |
581c25f8 MK |
1058 | /* Delivered decapped frame: |
1059 | * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc | |
1060 | * [payload] | |
1061 | */ | |
1062 | ||
1063 | rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); | |
1064 | if (WARN_ON_ONCE(!rfc1042)) | |
1065 | return; | |
1066 | ||
1067 | /* pull decapped header and copy SA & DA */ | |
1068 | eth = (struct ethhdr *)msdu->data; | |
1069 | ether_addr_copy(da, eth->h_dest); | |
1070 | ether_addr_copy(sa, eth->h_source); | |
1071 | skb_pull(msdu, sizeof(struct ethhdr)); | |
1072 | ||
1073 | /* push rfc1042/llc/snap */ | |
1074 | memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, | |
1075 | sizeof(struct rfc1042_hdr)); | |
1076 | ||
1077 | /* push original 802.11 header */ | |
1078 | hdr = (struct ieee80211_hdr *)first_hdr; | |
1079 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1080 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); | |
1081 | ||
1082 | /* original 802.11 header has a different DA and in | |
1083 | * case of 4addr it may also have different SA | |
1084 | */ | |
1085 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1086 | ether_addr_copy(ieee80211_get_DA(hdr), da); | |
1087 | ether_addr_copy(ieee80211_get_SA(hdr), sa); | |
1088 | } | |
1089 | ||
1090 | static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, | |
1091 | struct sk_buff *msdu, | |
1092 | struct ieee80211_rx_status *status, | |
1093 | const u8 first_hdr[64]) | |
1094 | { | |
1095 | struct ieee80211_hdr *hdr; | |
1096 | size_t hdr_len; | |
1097 | ||
1098 | /* Delivered decapped frame: | |
1099 | * [amsdu header] <-- replaced with 802.11 hdr | |
1100 | * [rfc1042/llc] | |
1101 | * [payload] | |
1102 | */ | |
1103 | ||
1104 | skb_pull(msdu, sizeof(struct amsdu_subframe_hdr)); | |
1105 | ||
1106 | hdr = (struct ieee80211_hdr *)first_hdr; | |
e3fbf8d2 | 1107 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
581c25f8 MK |
1108 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
1109 | } | |
5e3dd157 | 1110 | |
581c25f8 MK |
1111 | static void ath10k_htt_rx_h_undecap(struct ath10k *ar, |
1112 | struct sk_buff *msdu, | |
1113 | struct ieee80211_rx_status *status, | |
1114 | u8 first_hdr[64], | |
1115 | enum htt_rx_mpdu_encrypt_type enctype, | |
1116 | bool is_decrypted) | |
1117 | { | |
1118 | struct htt_rx_desc *rxd; | |
1119 | enum rx_msdu_decap_format decap; | |
1120 | struct ieee80211_hdr *hdr; | |
f6dc2095 | 1121 | |
581c25f8 MK |
1122 | /* First msdu's decapped header: |
1123 | * [802.11 header] <-- padded to 4 bytes long | |
1124 | * [crypto param] <-- padded to 4 bytes long | |
1125 | * [amsdu header] <-- only if A-MSDU | |
1126 | * [rfc1042/llc] | |
1127 | * | |
1128 | * Other (2nd, 3rd, ..) msdu's decapped header: | |
1129 | * [amsdu header] <-- only if A-MSDU | |
1130 | * [rfc1042/llc] | |
1131 | */ | |
1132 | ||
1133 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1134 | hdr = (void *)rxd->rx_hdr_status; | |
1135 | decap = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
1136 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
1137 | ||
1138 | switch (decap) { | |
5e3dd157 | 1139 | case RX_MSDU_DECAP_RAW: |
581c25f8 MK |
1140 | ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, |
1141 | is_decrypted); | |
5e3dd157 KV |
1142 | break; |
1143 | case RX_MSDU_DECAP_NATIVE_WIFI: | |
581c25f8 | 1144 | ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); |
5e3dd157 KV |
1145 | break; |
1146 | case RX_MSDU_DECAP_ETHERNET2_DIX: | |
581c25f8 | 1147 | ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); |
e3fbf8d2 MK |
1148 | break; |
1149 | case RX_MSDU_DECAP_8023_SNAP_LLC: | |
581c25f8 | 1150 | ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); |
e3fbf8d2 | 1151 | break; |
5e3dd157 | 1152 | } |
5e3dd157 KV |
1153 | } |
1154 | ||
605f81aa MK |
1155 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) |
1156 | { | |
1157 | struct htt_rx_desc *rxd; | |
1158 | u32 flags, info; | |
1159 | bool is_ip4, is_ip6; | |
1160 | bool is_tcp, is_udp; | |
1161 | bool ip_csum_ok, tcpudp_csum_ok; | |
1162 | ||
1163 | rxd = (void *)skb->data - sizeof(*rxd); | |
1164 | flags = __le32_to_cpu(rxd->attention.flags); | |
1165 | info = __le32_to_cpu(rxd->msdu_start.info1); | |
1166 | ||
1167 | is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); | |
1168 | is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); | |
1169 | is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); | |
1170 | is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); | |
1171 | ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); | |
1172 | tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); | |
1173 | ||
1174 | if (!is_ip4 && !is_ip6) | |
1175 | return CHECKSUM_NONE; | |
1176 | if (!is_tcp && !is_udp) | |
1177 | return CHECKSUM_NONE; | |
1178 | if (!ip_csum_ok) | |
1179 | return CHECKSUM_NONE; | |
1180 | if (!tcpudp_csum_ok) | |
1181 | return CHECKSUM_NONE; | |
1182 | ||
1183 | return CHECKSUM_UNNECESSARY; | |
1184 | } | |
1185 | ||
581c25f8 MK |
1186 | static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) |
1187 | { | |
1188 | msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); | |
1189 | } | |
1190 | ||
1191 | static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, | |
1192 | struct sk_buff_head *amsdu, | |
1193 | struct ieee80211_rx_status *status) | |
1194 | { | |
1195 | struct sk_buff *first; | |
1196 | struct sk_buff *last; | |
1197 | struct sk_buff *msdu; | |
1198 | struct htt_rx_desc *rxd; | |
1199 | struct ieee80211_hdr *hdr; | |
1200 | enum htt_rx_mpdu_encrypt_type enctype; | |
1201 | u8 first_hdr[64]; | |
1202 | u8 *qos; | |
1203 | size_t hdr_len; | |
1204 | bool has_fcs_err; | |
1205 | bool has_crypto_err; | |
1206 | bool has_tkip_err; | |
1207 | bool has_peer_idx_invalid; | |
1208 | bool is_decrypted; | |
1209 | u32 attention; | |
1210 | ||
1211 | if (skb_queue_empty(amsdu)) | |
1212 | return; | |
1213 | ||
1214 | first = skb_peek(amsdu); | |
1215 | rxd = (void *)first->data - sizeof(*rxd); | |
1216 | ||
1217 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
1218 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
1219 | ||
1220 | /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 | |
1221 | * decapped header. It'll be used for undecapping of each MSDU. | |
1222 | */ | |
1223 | hdr = (void *)rxd->rx_hdr_status; | |
1224 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1225 | memcpy(first_hdr, hdr, hdr_len); | |
1226 | ||
1227 | /* Each A-MSDU subframe will use the original header as the base and be | |
1228 | * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. | |
1229 | */ | |
1230 | hdr = (void *)first_hdr; | |
1231 | qos = ieee80211_get_qos_ctl(hdr); | |
1232 | qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; | |
1233 | ||
1234 | /* Some attention flags are valid only in the last MSDU. */ | |
1235 | last = skb_peek_tail(amsdu); | |
1236 | rxd = (void *)last->data - sizeof(*rxd); | |
1237 | attention = __le32_to_cpu(rxd->attention.flags); | |
1238 | ||
1239 | has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); | |
1240 | has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); | |
1241 | has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); | |
1242 | has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); | |
1243 | ||
1244 | /* Note: If hardware captures an encrypted frame that it can't decrypt, | |
1245 | * e.g. due to fcs error, missing peer or invalid key data it will | |
1246 | * report the frame as raw. | |
1247 | */ | |
1248 | is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && | |
1249 | !has_fcs_err && | |
1250 | !has_crypto_err && | |
1251 | !has_peer_idx_invalid); | |
1252 | ||
1253 | /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ | |
1254 | status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | | |
1255 | RX_FLAG_MMIC_ERROR | | |
1256 | RX_FLAG_DECRYPTED | | |
1257 | RX_FLAG_IV_STRIPPED | | |
1258 | RX_FLAG_MMIC_STRIPPED); | |
1259 | ||
1260 | if (has_fcs_err) | |
1261 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | |
1262 | ||
1263 | if (has_tkip_err) | |
1264 | status->flag |= RX_FLAG_MMIC_ERROR; | |
1265 | ||
1266 | if (is_decrypted) | |
1267 | status->flag |= RX_FLAG_DECRYPTED | | |
1268 | RX_FLAG_IV_STRIPPED | | |
1269 | RX_FLAG_MMIC_STRIPPED; | |
1270 | ||
1271 | skb_queue_walk(amsdu, msdu) { | |
1272 | ath10k_htt_rx_h_csum_offload(msdu); | |
1273 | ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, | |
1274 | is_decrypted); | |
1275 | ||
1276 | /* Undecapping involves copying the original 802.11 header back | |
1277 | * to sk_buff. If frame is protected and hardware has decrypted | |
1278 | * it then remove the protected bit. | |
1279 | */ | |
1280 | if (!is_decrypted) | |
1281 | continue; | |
1282 | ||
1283 | hdr = (void *)msdu->data; | |
1284 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | |
1285 | } | |
1286 | } | |
1287 | ||
1288 | static void ath10k_htt_rx_h_deliver(struct ath10k *ar, | |
1289 | struct sk_buff_head *amsdu, | |
1290 | struct ieee80211_rx_status *status) | |
1291 | { | |
1292 | struct sk_buff *msdu; | |
1293 | ||
1294 | while ((msdu = __skb_dequeue(amsdu))) { | |
1295 | /* Setup per-MSDU flags */ | |
1296 | if (skb_queue_empty(amsdu)) | |
1297 | status->flag &= ~RX_FLAG_AMSDU_MORE; | |
1298 | else | |
1299 | status->flag |= RX_FLAG_AMSDU_MORE; | |
1300 | ||
1301 | ath10k_process_rx(ar, status, msdu); | |
1302 | } | |
1303 | } | |
1304 | ||
9aa505d2 | 1305 | static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) |
bfa35368 | 1306 | { |
9aa505d2 | 1307 | struct sk_buff *skb, *first; |
bfa35368 BG |
1308 | int space; |
1309 | int total_len = 0; | |
1310 | ||
1311 | /* TODO: Might could optimize this by using | |
1312 | * skb_try_coalesce or similar method to | |
1313 | * decrease copying, or maybe get mac80211 to | |
1314 | * provide a way to just receive a list of | |
1315 | * skb? | |
1316 | */ | |
1317 | ||
9aa505d2 | 1318 | first = __skb_dequeue(amsdu); |
bfa35368 BG |
1319 | |
1320 | /* Allocate total length all at once. */ | |
9aa505d2 MK |
1321 | skb_queue_walk(amsdu, skb) |
1322 | total_len += skb->len; | |
bfa35368 | 1323 | |
9aa505d2 | 1324 | space = total_len - skb_tailroom(first); |
bfa35368 | 1325 | if ((space > 0) && |
9aa505d2 | 1326 | (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { |
bfa35368 BG |
1327 | /* TODO: bump some rx-oom error stat */ |
1328 | /* put it back together so we can free the | |
1329 | * whole list at once. | |
1330 | */ | |
9aa505d2 | 1331 | __skb_queue_head(amsdu, first); |
bfa35368 BG |
1332 | return -1; |
1333 | } | |
1334 | ||
1335 | /* Walk list again, copying contents into | |
1336 | * msdu_head | |
1337 | */ | |
9aa505d2 MK |
1338 | while ((skb = __skb_dequeue(amsdu))) { |
1339 | skb_copy_from_linear_data(skb, skb_put(first, skb->len), | |
1340 | skb->len); | |
1341 | dev_kfree_skb_any(skb); | |
bfa35368 BG |
1342 | } |
1343 | ||
9aa505d2 | 1344 | __skb_queue_head(amsdu, first); |
bfa35368 BG |
1345 | return 0; |
1346 | } | |
1347 | ||
581c25f8 MK |
1348 | static void ath10k_htt_rx_h_unchain(struct ath10k *ar, |
1349 | struct sk_buff_head *amsdu, | |
1350 | bool chained) | |
2acc4eb2 | 1351 | { |
581c25f8 MK |
1352 | struct sk_buff *first; |
1353 | struct htt_rx_desc *rxd; | |
1354 | enum rx_msdu_decap_format decap; | |
7aa7a72a | 1355 | |
581c25f8 MK |
1356 | first = skb_peek(amsdu); |
1357 | rxd = (void *)first->data - sizeof(*rxd); | |
1358 | decap = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
1359 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
2acc4eb2 | 1360 | |
581c25f8 MK |
1361 | if (!chained) |
1362 | return; | |
1363 | ||
1364 | /* FIXME: Current unchaining logic can only handle simple case of raw | |
1365 | * msdu chaining. If decapping is other than raw the chaining may be | |
1366 | * more complex and this isn't handled by the current code. Don't even | |
1367 | * try re-constructing such frames - it'll be pretty much garbage. | |
1368 | */ | |
1369 | if (decap != RX_MSDU_DECAP_RAW || | |
1370 | skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { | |
1371 | __skb_queue_purge(amsdu); | |
1372 | return; | |
2acc4eb2 JD |
1373 | } |
1374 | ||
581c25f8 MK |
1375 | ath10k_unchain_msdu(amsdu); |
1376 | } | |
1377 | ||
1378 | static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, | |
1379 | struct sk_buff_head *amsdu, | |
1380 | struct ieee80211_rx_status *rx_status) | |
1381 | { | |
1382 | struct sk_buff *msdu; | |
1383 | struct htt_rx_desc *rxd; | |
d67d0a02 MK |
1384 | bool is_mgmt; |
1385 | bool has_fcs_err; | |
581c25f8 MK |
1386 | |
1387 | msdu = skb_peek(amsdu); | |
1388 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1389 | ||
1390 | /* FIXME: It might be a good idea to do some fuzzy-testing to drop | |
1391 | * invalid/dangerous frames. | |
1392 | */ | |
1393 | ||
1394 | if (!rx_status->freq) { | |
1395 | ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); | |
36653f05 JD |
1396 | return false; |
1397 | } | |
1398 | ||
d67d0a02 MK |
1399 | is_mgmt = !!(rxd->attention.flags & |
1400 | __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); | |
1401 | has_fcs_err = !!(rxd->attention.flags & | |
1402 | __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR)); | |
1403 | ||
581c25f8 MK |
1404 | /* Management frames are handled via WMI events. The pros of such |
1405 | * approach is that channel is explicitly provided in WMI events | |
1406 | * whereas HTT doesn't provide channel information for Rxed frames. | |
d67d0a02 MK |
1407 | * |
1408 | * However some firmware revisions don't report corrupted frames via | |
1409 | * WMI so don't drop them. | |
581c25f8 | 1410 | */ |
d67d0a02 | 1411 | if (is_mgmt && !has_fcs_err) { |
7aa7a72a | 1412 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); |
2acc4eb2 JD |
1413 | return false; |
1414 | } | |
1415 | ||
581c25f8 MK |
1416 | if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { |
1417 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); | |
2acc4eb2 JD |
1418 | return false; |
1419 | } | |
1420 | ||
1421 | return true; | |
1422 | } | |
1423 | ||
581c25f8 MK |
1424 | static void ath10k_htt_rx_h_filter(struct ath10k *ar, |
1425 | struct sk_buff_head *amsdu, | |
1426 | struct ieee80211_rx_status *rx_status) | |
1427 | { | |
1428 | if (skb_queue_empty(amsdu)) | |
1429 | return; | |
1430 | ||
1431 | if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) | |
1432 | return; | |
1433 | ||
1434 | __skb_queue_purge(amsdu); | |
1435 | } | |
1436 | ||
5e3dd157 KV |
1437 | static void ath10k_htt_rx_handler(struct ath10k_htt *htt, |
1438 | struct htt_rx_indication *rx) | |
1439 | { | |
7aa7a72a | 1440 | struct ath10k *ar = htt->ar; |
6df92a3d | 1441 | struct ieee80211_rx_status *rx_status = &htt->rx_status; |
5e3dd157 | 1442 | struct htt_rx_indication_mpdu_range *mpdu_ranges; |
9aa505d2 | 1443 | struct sk_buff_head amsdu; |
5e3dd157 KV |
1444 | int num_mpdu_ranges; |
1445 | int fw_desc_len; | |
1446 | u8 *fw_desc; | |
d540690d | 1447 | int i, ret, mpdu_count = 0; |
5e3dd157 | 1448 | |
45967089 MK |
1449 | lockdep_assert_held(&htt->rx_ring.lock); |
1450 | ||
e0bd7513 MK |
1451 | if (htt->rx_confused) |
1452 | return; | |
1453 | ||
5e3dd157 KV |
1454 | fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); |
1455 | fw_desc = (u8 *)&rx->fw_desc; | |
1456 | ||
1457 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | |
1458 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | |
1459 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); | |
1460 | ||
7aa7a72a | 1461 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", |
5e3dd157 KV |
1462 | rx, sizeof(*rx) + |
1463 | (sizeof(struct htt_rx_indication_mpdu_range) * | |
1464 | num_mpdu_ranges)); | |
1465 | ||
d540690d MK |
1466 | for (i = 0; i < num_mpdu_ranges; i++) |
1467 | mpdu_count += mpdu_ranges[i].mpdu_count; | |
1468 | ||
1469 | while (mpdu_count--) { | |
d540690d MK |
1470 | __skb_queue_head_init(&amsdu); |
1471 | ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, | |
f0e2770f | 1472 | &fw_desc_len, &amsdu); |
d540690d | 1473 | if (ret < 0) { |
e0bd7513 | 1474 | ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); |
d540690d | 1475 | __skb_queue_purge(&amsdu); |
e0bd7513 MK |
1476 | /* FIXME: It's probably a good idea to reboot the |
1477 | * device instead of leaving it inoperable. | |
1478 | */ | |
1479 | htt->rx_confused = true; | |
1480 | break; | |
d540690d | 1481 | } |
5e3dd157 | 1482 | |
b9fd8a84 | 1483 | ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); |
581c25f8 MK |
1484 | ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); |
1485 | ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); | |
1486 | ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); | |
1487 | ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); | |
5e3dd157 KV |
1488 | } |
1489 | ||
6e712d42 | 1490 | tasklet_schedule(&htt->rx_replenish_task); |
5e3dd157 KV |
1491 | } |
1492 | ||
1493 | static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, | |
5b07e07f | 1494 | struct htt_rx_fragment_indication *frag) |
5e3dd157 | 1495 | { |
7aa7a72a | 1496 | struct ath10k *ar = htt->ar; |
6df92a3d | 1497 | struct ieee80211_rx_status *rx_status = &htt->rx_status; |
9aa505d2 | 1498 | struct sk_buff_head amsdu; |
d84dd60f | 1499 | int ret; |
5e3dd157 | 1500 | u8 *fw_desc; |
581c25f8 | 1501 | int fw_desc_len; |
5e3dd157 KV |
1502 | |
1503 | fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); | |
1504 | fw_desc = (u8 *)frag->fw_msdu_rx_desc; | |
1505 | ||
9aa505d2 | 1506 | __skb_queue_head_init(&amsdu); |
45967089 MK |
1507 | |
1508 | spin_lock_bh(&htt->rx_ring.lock); | |
d84dd60f | 1509 | ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, |
f0e2770f | 1510 | &amsdu); |
45967089 | 1511 | spin_unlock_bh(&htt->rx_ring.lock); |
5e3dd157 | 1512 | |
686687c9 MK |
1513 | tasklet_schedule(&htt->rx_replenish_task); |
1514 | ||
7aa7a72a | 1515 | ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); |
5e3dd157 | 1516 | |
d84dd60f | 1517 | if (ret) { |
7aa7a72a | 1518 | ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", |
d84dd60f | 1519 | ret); |
9aa505d2 | 1520 | __skb_queue_purge(&amsdu); |
5e3dd157 KV |
1521 | return; |
1522 | } | |
1523 | ||
9aa505d2 MK |
1524 | if (skb_queue_len(&amsdu) != 1) { |
1525 | ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); | |
1526 | __skb_queue_purge(&amsdu); | |
1527 | return; | |
1528 | } | |
1529 | ||
89a5a317 | 1530 | ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); |
581c25f8 MK |
1531 | ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); |
1532 | ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); | |
1533 | ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); | |
5e3dd157 | 1534 | |
5e3dd157 | 1535 | if (fw_desc_len > 0) { |
7aa7a72a | 1536 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
5e3dd157 KV |
1537 | "expecting more fragmented rx in one indication %d\n", |
1538 | fw_desc_len); | |
1539 | } | |
1540 | } | |
1541 | ||
6c5151a9 MK |
1542 | static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, |
1543 | struct sk_buff *skb) | |
1544 | { | |
1545 | struct ath10k_htt *htt = &ar->htt; | |
1546 | struct htt_resp *resp = (struct htt_resp *)skb->data; | |
1547 | struct htt_tx_done tx_done = {}; | |
1548 | int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); | |
1549 | __le16 msdu_id; | |
1550 | int i; | |
1551 | ||
45967089 MK |
1552 | lockdep_assert_held(&htt->tx_lock); |
1553 | ||
6c5151a9 MK |
1554 | switch (status) { |
1555 | case HTT_DATA_TX_STATUS_NO_ACK: | |
1556 | tx_done.no_ack = true; | |
1557 | break; | |
1558 | case HTT_DATA_TX_STATUS_OK: | |
1559 | break; | |
1560 | case HTT_DATA_TX_STATUS_DISCARD: | |
1561 | case HTT_DATA_TX_STATUS_POSTPONE: | |
1562 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: | |
1563 | tx_done.discard = true; | |
1564 | break; | |
1565 | default: | |
7aa7a72a | 1566 | ath10k_warn(ar, "unhandled tx completion status %d\n", status); |
6c5151a9 MK |
1567 | tx_done.discard = true; |
1568 | break; | |
1569 | } | |
1570 | ||
7aa7a72a | 1571 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", |
6c5151a9 MK |
1572 | resp->data_tx_completion.num_msdus); |
1573 | ||
1574 | for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { | |
1575 | msdu_id = resp->data_tx_completion.msdus[i]; | |
1576 | tx_done.msdu_id = __le16_to_cpu(msdu_id); | |
1577 | ath10k_txrx_tx_unref(htt, &tx_done); | |
1578 | } | |
1579 | } | |
1580 | ||
aa5b4fbc MK |
1581 | static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) |
1582 | { | |
1583 | struct htt_rx_addba *ev = &resp->rx_addba; | |
1584 | struct ath10k_peer *peer; | |
1585 | struct ath10k_vif *arvif; | |
1586 | u16 info0, tid, peer_id; | |
1587 | ||
1588 | info0 = __le16_to_cpu(ev->info0); | |
1589 | tid = MS(info0, HTT_RX_BA_INFO0_TID); | |
1590 | peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); | |
1591 | ||
7aa7a72a | 1592 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1593 | "htt rx addba tid %hu peer_id %hu size %hhu\n", |
1594 | tid, peer_id, ev->window_size); | |
1595 | ||
1596 | spin_lock_bh(&ar->data_lock); | |
1597 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
1598 | if (!peer) { | |
7aa7a72a | 1599 | ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", |
aa5b4fbc MK |
1600 | peer_id); |
1601 | spin_unlock_bh(&ar->data_lock); | |
1602 | return; | |
1603 | } | |
1604 | ||
1605 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
1606 | if (!arvif) { | |
7aa7a72a | 1607 | ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", |
aa5b4fbc MK |
1608 | peer->vdev_id); |
1609 | spin_unlock_bh(&ar->data_lock); | |
1610 | return; | |
1611 | } | |
1612 | ||
7aa7a72a | 1613 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1614 | "htt rx start rx ba session sta %pM tid %hu size %hhu\n", |
1615 | peer->addr, tid, ev->window_size); | |
1616 | ||
1617 | ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); | |
1618 | spin_unlock_bh(&ar->data_lock); | |
1619 | } | |
1620 | ||
1621 | static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) | |
1622 | { | |
1623 | struct htt_rx_delba *ev = &resp->rx_delba; | |
1624 | struct ath10k_peer *peer; | |
1625 | struct ath10k_vif *arvif; | |
1626 | u16 info0, tid, peer_id; | |
1627 | ||
1628 | info0 = __le16_to_cpu(ev->info0); | |
1629 | tid = MS(info0, HTT_RX_BA_INFO0_TID); | |
1630 | peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); | |
1631 | ||
7aa7a72a | 1632 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1633 | "htt rx delba tid %hu peer_id %hu\n", |
1634 | tid, peer_id); | |
1635 | ||
1636 | spin_lock_bh(&ar->data_lock); | |
1637 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
1638 | if (!peer) { | |
7aa7a72a | 1639 | ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", |
aa5b4fbc MK |
1640 | peer_id); |
1641 | spin_unlock_bh(&ar->data_lock); | |
1642 | return; | |
1643 | } | |
1644 | ||
1645 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
1646 | if (!arvif) { | |
7aa7a72a | 1647 | ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", |
aa5b4fbc MK |
1648 | peer->vdev_id); |
1649 | spin_unlock_bh(&ar->data_lock); | |
1650 | return; | |
1651 | } | |
1652 | ||
7aa7a72a | 1653 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1654 | "htt rx stop rx ba session sta %pM tid %hu\n", |
1655 | peer->addr, tid); | |
1656 | ||
1657 | ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); | |
1658 | spin_unlock_bh(&ar->data_lock); | |
1659 | } | |
1660 | ||
5e3dd157 KV |
1661 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
1662 | { | |
edb8236d | 1663 | struct ath10k_htt *htt = &ar->htt; |
5e3dd157 KV |
1664 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
1665 | ||
1666 | /* confirm alignment */ | |
1667 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | |
7aa7a72a | 1668 | ath10k_warn(ar, "unaligned htt message, expect trouble\n"); |
5e3dd157 | 1669 | |
7aa7a72a | 1670 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", |
5e3dd157 KV |
1671 | resp->hdr.msg_type); |
1672 | switch (resp->hdr.msg_type) { | |
1673 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { | |
1674 | htt->target_version_major = resp->ver_resp.major; | |
1675 | htt->target_version_minor = resp->ver_resp.minor; | |
1676 | complete(&htt->target_version_received); | |
1677 | break; | |
1678 | } | |
6c5151a9 | 1679 | case HTT_T2H_MSG_TYPE_RX_IND: |
45967089 MK |
1680 | spin_lock_bh(&htt->rx_ring.lock); |
1681 | __skb_queue_tail(&htt->rx_compl_q, skb); | |
1682 | spin_unlock_bh(&htt->rx_ring.lock); | |
6c5151a9 MK |
1683 | tasklet_schedule(&htt->txrx_compl_task); |
1684 | return; | |
5e3dd157 KV |
1685 | case HTT_T2H_MSG_TYPE_PEER_MAP: { |
1686 | struct htt_peer_map_event ev = { | |
1687 | .vdev_id = resp->peer_map.vdev_id, | |
1688 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), | |
1689 | }; | |
1690 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); | |
1691 | ath10k_peer_map_event(htt, &ev); | |
1692 | break; | |
1693 | } | |
1694 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { | |
1695 | struct htt_peer_unmap_event ev = { | |
1696 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), | |
1697 | }; | |
1698 | ath10k_peer_unmap_event(htt, &ev); | |
1699 | break; | |
1700 | } | |
1701 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { | |
1702 | struct htt_tx_done tx_done = {}; | |
1703 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); | |
1704 | ||
1705 | tx_done.msdu_id = | |
1706 | __le32_to_cpu(resp->mgmt_tx_completion.desc_id); | |
1707 | ||
1708 | switch (status) { | |
1709 | case HTT_MGMT_TX_STATUS_OK: | |
1710 | break; | |
1711 | case HTT_MGMT_TX_STATUS_RETRY: | |
1712 | tx_done.no_ack = true; | |
1713 | break; | |
1714 | case HTT_MGMT_TX_STATUS_DROP: | |
1715 | tx_done.discard = true; | |
1716 | break; | |
1717 | } | |
1718 | ||
6c5151a9 | 1719 | spin_lock_bh(&htt->tx_lock); |
0a89f8a0 | 1720 | ath10k_txrx_tx_unref(htt, &tx_done); |
6c5151a9 | 1721 | spin_unlock_bh(&htt->tx_lock); |
5e3dd157 KV |
1722 | break; |
1723 | } | |
6c5151a9 MK |
1724 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: |
1725 | spin_lock_bh(&htt->tx_lock); | |
1726 | __skb_queue_tail(&htt->tx_compl_q, skb); | |
1727 | spin_unlock_bh(&htt->tx_lock); | |
1728 | tasklet_schedule(&htt->txrx_compl_task); | |
1729 | return; | |
5e3dd157 KV |
1730 | case HTT_T2H_MSG_TYPE_SEC_IND: { |
1731 | struct ath10k *ar = htt->ar; | |
1732 | struct htt_security_indication *ev = &resp->security_indication; | |
1733 | ||
7aa7a72a | 1734 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
5e3dd157 KV |
1735 | "sec ind peer_id %d unicast %d type %d\n", |
1736 | __le16_to_cpu(ev->peer_id), | |
1737 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), | |
1738 | MS(ev->flags, HTT_SECURITY_TYPE)); | |
1739 | complete(&ar->install_key_done); | |
1740 | break; | |
1741 | } | |
1742 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { | |
7aa7a72a | 1743 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
5e3dd157 KV |
1744 | skb->data, skb->len); |
1745 | ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); | |
1746 | break; | |
1747 | } | |
1748 | case HTT_T2H_MSG_TYPE_TEST: | |
1749 | /* FIX THIS */ | |
1750 | break; | |
5e3dd157 | 1751 | case HTT_T2H_MSG_TYPE_STATS_CONF: |
d35a6c18 | 1752 | trace_ath10k_htt_stats(ar, skb->data, skb->len); |
a9bf0506 KV |
1753 | break; |
1754 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: | |
708b9bde MK |
1755 | /* Firmware can return tx frames if it's unable to fully |
1756 | * process them and suspects host may be able to fix it. ath10k | |
1757 | * sends all tx frames as already inspected so this shouldn't | |
1758 | * happen unless fw has a bug. | |
1759 | */ | |
7aa7a72a | 1760 | ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); |
708b9bde | 1761 | break; |
5e3dd157 | 1762 | case HTT_T2H_MSG_TYPE_RX_ADDBA: |
aa5b4fbc MK |
1763 | ath10k_htt_rx_addba(ar, resp); |
1764 | break; | |
5e3dd157 | 1765 | case HTT_T2H_MSG_TYPE_RX_DELBA: |
aa5b4fbc MK |
1766 | ath10k_htt_rx_delba(ar, resp); |
1767 | break; | |
bfdd7937 RM |
1768 | case HTT_T2H_MSG_TYPE_PKTLOG: { |
1769 | struct ath10k_pktlog_hdr *hdr = | |
1770 | (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload; | |
1771 | ||
1772 | trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, | |
1773 | sizeof(*hdr) + | |
1774 | __le16_to_cpu(hdr->size)); | |
1775 | break; | |
1776 | } | |
aa5b4fbc MK |
1777 | case HTT_T2H_MSG_TYPE_RX_FLUSH: { |
1778 | /* Ignore this event because mac80211 takes care of Rx | |
1779 | * aggregation reordering. | |
1780 | */ | |
1781 | break; | |
1782 | } | |
5e3dd157 | 1783 | default: |
2358a544 MK |
1784 | ath10k_warn(ar, "htt event (%d) not handled\n", |
1785 | resp->hdr.msg_type); | |
7aa7a72a | 1786 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
5e3dd157 KV |
1787 | skb->data, skb->len); |
1788 | break; | |
1789 | }; | |
1790 | ||
1791 | /* Free the indication buffer */ | |
1792 | dev_kfree_skb_any(skb); | |
1793 | } | |
6c5151a9 MK |
1794 | |
1795 | static void ath10k_htt_txrx_compl_task(unsigned long ptr) | |
1796 | { | |
1797 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | |
1798 | struct htt_resp *resp; | |
1799 | struct sk_buff *skb; | |
1800 | ||
45967089 MK |
1801 | spin_lock_bh(&htt->tx_lock); |
1802 | while ((skb = __skb_dequeue(&htt->tx_compl_q))) { | |
6c5151a9 MK |
1803 | ath10k_htt_rx_frm_tx_compl(htt->ar, skb); |
1804 | dev_kfree_skb_any(skb); | |
1805 | } | |
45967089 | 1806 | spin_unlock_bh(&htt->tx_lock); |
6c5151a9 | 1807 | |
45967089 MK |
1808 | spin_lock_bh(&htt->rx_ring.lock); |
1809 | while ((skb = __skb_dequeue(&htt->rx_compl_q))) { | |
6c5151a9 MK |
1810 | resp = (struct htt_resp *)skb->data; |
1811 | ath10k_htt_rx_handler(htt, &resp->rx_ind); | |
1812 | dev_kfree_skb_any(skb); | |
1813 | } | |
45967089 | 1814 | spin_unlock_bh(&htt->rx_ring.lock); |
6c5151a9 | 1815 | } |