Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
edb8236d | 18 | #include "core.h" |
5e3dd157 KV |
19 | #include "htc.h" |
20 | #include "htt.h" | |
21 | #include "txrx.h" | |
22 | #include "debug.h" | |
23 | ||
24 | #include <linux/log2.h> | |
25 | ||
26 | /* slightly larger than one large A-MPDU */ | |
27 | #define HTT_RX_RING_SIZE_MIN 128 | |
28 | ||
29 | /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ | |
30 | #define HTT_RX_RING_SIZE_MAX 2048 | |
31 | ||
32 | #define HTT_RX_AVG_FRM_BYTES 1000 | |
33 | ||
34 | /* ms, very conservative */ | |
35 | #define HTT_RX_HOST_LATENCY_MAX_MS 20 | |
36 | ||
37 | /* ms, conservative */ | |
38 | #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 | |
39 | ||
40 | /* when under memory pressure rx ring refill may fail and needs a retry */ | |
41 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | |
42 | ||
43 | static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) | |
44 | { | |
45 | int size; | |
46 | ||
47 | /* | |
48 | * It is expected that the host CPU will typically be able to | |
49 | * service the rx indication from one A-MPDU before the rx | |
50 | * indication from the subsequent A-MPDU happens, roughly 1-2 ms | |
51 | * later. However, the rx ring should be sized very conservatively, | |
52 | * to accomodate the worst reasonable delay before the host CPU | |
53 | * services a rx indication interrupt. | |
54 | * | |
55 | * The rx ring need not be kept full of empty buffers. In theory, | |
56 | * the htt host SW can dynamically track the low-water mark in the | |
57 | * rx ring, and dynamically adjust the level to which the rx ring | |
58 | * is filled with empty buffers, to dynamically meet the desired | |
59 | * low-water mark. | |
60 | * | |
61 | * In contrast, it's difficult to resize the rx ring itself, once | |
62 | * it's in use. Thus, the ring itself should be sized very | |
63 | * conservatively, while the degree to which the ring is filled | |
64 | * with empty buffers should be sized moderately conservatively. | |
65 | */ | |
66 | ||
67 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | |
68 | size = | |
69 | htt->max_throughput_mbps + | |
70 | 1000 / | |
71 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; | |
72 | ||
73 | if (size < HTT_RX_RING_SIZE_MIN) | |
74 | size = HTT_RX_RING_SIZE_MIN; | |
75 | ||
76 | if (size > HTT_RX_RING_SIZE_MAX) | |
77 | size = HTT_RX_RING_SIZE_MAX; | |
78 | ||
79 | size = roundup_pow_of_two(size); | |
80 | ||
81 | return size; | |
82 | } | |
83 | ||
84 | static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) | |
85 | { | |
86 | int size; | |
87 | ||
88 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | |
89 | size = | |
90 | htt->max_throughput_mbps * | |
91 | 1000 / | |
92 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; | |
93 | ||
94 | /* | |
95 | * Make sure the fill level is at least 1 less than the ring size. | |
96 | * Leaving 1 element empty allows the SW to easily distinguish | |
97 | * between a full ring vs. an empty ring. | |
98 | */ | |
99 | if (size >= htt->rx_ring.size) | |
100 | size = htt->rx_ring.size - 1; | |
101 | ||
102 | return size; | |
103 | } | |
104 | ||
105 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) | |
106 | { | |
107 | struct sk_buff *skb; | |
108 | struct ath10k_skb_cb *cb; | |
109 | int i; | |
110 | ||
111 | for (i = 0; i < htt->rx_ring.fill_cnt; i++) { | |
112 | skb = htt->rx_ring.netbufs_ring[i]; | |
113 | cb = ATH10K_SKB_CB(skb); | |
114 | dma_unmap_single(htt->ar->dev, cb->paddr, | |
115 | skb->len + skb_tailroom(skb), | |
116 | DMA_FROM_DEVICE); | |
117 | dev_kfree_skb_any(skb); | |
118 | } | |
119 | ||
120 | htt->rx_ring.fill_cnt = 0; | |
121 | } | |
122 | ||
123 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
124 | { | |
125 | struct htt_rx_desc *rx_desc; | |
126 | struct sk_buff *skb; | |
127 | dma_addr_t paddr; | |
128 | int ret = 0, idx; | |
129 | ||
130 | idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); | |
131 | while (num > 0) { | |
132 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | |
133 | if (!skb) { | |
134 | ret = -ENOMEM; | |
135 | goto fail; | |
136 | } | |
137 | ||
138 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) | |
139 | skb_pull(skb, | |
140 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - | |
141 | skb->data); | |
142 | ||
143 | /* Clear rx_desc attention word before posting to Rx ring */ | |
144 | rx_desc = (struct htt_rx_desc *)skb->data; | |
145 | rx_desc->attention.flags = __cpu_to_le32(0); | |
146 | ||
147 | paddr = dma_map_single(htt->ar->dev, skb->data, | |
148 | skb->len + skb_tailroom(skb), | |
149 | DMA_FROM_DEVICE); | |
150 | ||
151 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { | |
152 | dev_kfree_skb_any(skb); | |
153 | ret = -ENOMEM; | |
154 | goto fail; | |
155 | } | |
156 | ||
157 | ATH10K_SKB_CB(skb)->paddr = paddr; | |
158 | htt->rx_ring.netbufs_ring[idx] = skb; | |
159 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); | |
160 | htt->rx_ring.fill_cnt++; | |
161 | ||
162 | num--; | |
163 | idx++; | |
164 | idx &= htt->rx_ring.size_mask; | |
165 | } | |
166 | ||
167 | fail: | |
168 | *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); | |
169 | return ret; | |
170 | } | |
171 | ||
172 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
173 | { | |
174 | lockdep_assert_held(&htt->rx_ring.lock); | |
175 | return __ath10k_htt_rx_ring_fill_n(htt, num); | |
176 | } | |
177 | ||
178 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) | |
179 | { | |
180 | int ret, num_to_fill; | |
181 | ||
182 | spin_lock_bh(&htt->rx_ring.lock); | |
183 | num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; | |
184 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); | |
185 | if (ret == -ENOMEM) { | |
186 | /* | |
187 | * Failed to fill it to the desired level - | |
188 | * we'll start a timer and try again next time. | |
189 | * As long as enough buffers are left in the ring for | |
190 | * another A-MPDU rx, no special recovery is needed. | |
191 | */ | |
192 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + | |
193 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); | |
194 | } | |
195 | spin_unlock_bh(&htt->rx_ring.lock); | |
196 | } | |
197 | ||
198 | static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) | |
199 | { | |
200 | struct ath10k_htt *htt = (struct ath10k_htt *)arg; | |
201 | ath10k_htt_rx_msdu_buff_replenish(htt); | |
202 | } | |
203 | ||
204 | static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) | |
205 | { | |
206 | return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - | |
207 | htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; | |
208 | } | |
209 | ||
210 | void ath10k_htt_rx_detach(struct ath10k_htt *htt) | |
211 | { | |
212 | int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
213 | ||
214 | del_timer_sync(&htt->rx_ring.refill_retry_timer); | |
215 | ||
216 | while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { | |
217 | struct sk_buff *skb = | |
218 | htt->rx_ring.netbufs_ring[sw_rd_idx]; | |
219 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); | |
220 | ||
221 | dma_unmap_single(htt->ar->dev, cb->paddr, | |
222 | skb->len + skb_tailroom(skb), | |
223 | DMA_FROM_DEVICE); | |
224 | dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); | |
225 | sw_rd_idx++; | |
226 | sw_rd_idx &= htt->rx_ring.size_mask; | |
227 | } | |
228 | ||
229 | dma_free_coherent(htt->ar->dev, | |
230 | (htt->rx_ring.size * | |
231 | sizeof(htt->rx_ring.paddrs_ring)), | |
232 | htt->rx_ring.paddrs_ring, | |
233 | htt->rx_ring.base_paddr); | |
234 | ||
235 | dma_free_coherent(htt->ar->dev, | |
236 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
237 | htt->rx_ring.alloc_idx.vaddr, | |
238 | htt->rx_ring.alloc_idx.paddr); | |
239 | ||
240 | kfree(htt->rx_ring.netbufs_ring); | |
241 | } | |
242 | ||
243 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |
244 | { | |
245 | int idx; | |
246 | struct sk_buff *msdu; | |
247 | ||
248 | spin_lock_bh(&htt->rx_ring.lock); | |
249 | ||
250 | if (ath10k_htt_rx_ring_elems(htt) == 0) | |
251 | ath10k_warn("htt rx ring is empty!\n"); | |
252 | ||
253 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
254 | msdu = htt->rx_ring.netbufs_ring[idx]; | |
255 | ||
256 | idx++; | |
257 | idx &= htt->rx_ring.size_mask; | |
258 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; | |
259 | htt->rx_ring.fill_cnt--; | |
260 | ||
261 | spin_unlock_bh(&htt->rx_ring.lock); | |
262 | return msdu; | |
263 | } | |
264 | ||
265 | static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) | |
266 | { | |
267 | struct sk_buff *next; | |
268 | ||
269 | while (skb) { | |
270 | next = skb->next; | |
271 | dev_kfree_skb_any(skb); | |
272 | skb = next; | |
273 | } | |
274 | } | |
275 | ||
276 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, | |
277 | u8 **fw_desc, int *fw_desc_len, | |
278 | struct sk_buff **head_msdu, | |
279 | struct sk_buff **tail_msdu) | |
280 | { | |
281 | int msdu_len, msdu_chaining = 0; | |
282 | struct sk_buff *msdu; | |
283 | struct htt_rx_desc *rx_desc; | |
284 | ||
285 | if (ath10k_htt_rx_ring_elems(htt) == 0) | |
286 | ath10k_warn("htt rx ring is empty!\n"); | |
287 | ||
288 | if (htt->rx_confused) { | |
289 | ath10k_warn("htt is confused. refusing rx\n"); | |
290 | return 0; | |
291 | } | |
292 | ||
293 | msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); | |
294 | while (msdu) { | |
295 | int last_msdu, msdu_len_invalid, msdu_chained; | |
296 | ||
297 | dma_unmap_single(htt->ar->dev, | |
298 | ATH10K_SKB_CB(msdu)->paddr, | |
299 | msdu->len + skb_tailroom(msdu), | |
300 | DMA_FROM_DEVICE); | |
301 | ||
302 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", | |
303 | msdu->data, msdu->len + skb_tailroom(msdu)); | |
304 | ||
305 | rx_desc = (struct htt_rx_desc *)msdu->data; | |
306 | ||
307 | /* FIXME: we must report msdu payload since this is what caller | |
308 | * expects now */ | |
309 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
310 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
311 | ||
312 | /* | |
313 | * Sanity check - confirm the HW is finished filling in the | |
314 | * rx data. | |
315 | * If the HW and SW are working correctly, then it's guaranteed | |
316 | * that the HW's MAC DMA is done before this point in the SW. | |
317 | * To prevent the case that we handle a stale Rx descriptor, | |
318 | * just assert for now until we have a way to recover. | |
319 | */ | |
320 | if (!(__le32_to_cpu(rx_desc->attention.flags) | |
321 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
322 | ath10k_htt_rx_free_msdu_chain(*head_msdu); | |
323 | *head_msdu = NULL; | |
324 | msdu = NULL; | |
325 | ath10k_err("htt rx stopped. cannot recover\n"); | |
326 | htt->rx_confused = true; | |
327 | break; | |
328 | } | |
329 | ||
330 | /* | |
331 | * Copy the FW rx descriptor for this MSDU from the rx | |
332 | * indication message into the MSDU's netbuf. HL uses the | |
333 | * same rx indication message definition as LL, and simply | |
334 | * appends new info (fields from the HW rx desc, and the | |
335 | * MSDU payload itself). So, the offset into the rx | |
336 | * indication message only has to account for the standard | |
337 | * offset of the per-MSDU FW rx desc info within the | |
338 | * message, and how many bytes of the per-MSDU FW rx desc | |
339 | * info have already been consumed. (And the endianness of | |
340 | * the host, since for a big-endian host, the rx ind | |
341 | * message contents, including the per-MSDU rx desc bytes, | |
342 | * were byteswapped during upload.) | |
343 | */ | |
344 | if (*fw_desc_len > 0) { | |
345 | rx_desc->fw_desc.info0 = **fw_desc; | |
346 | /* | |
347 | * The target is expected to only provide the basic | |
348 | * per-MSDU rx descriptors. Just to be sure, verify | |
349 | * that the target has not attached extension data | |
350 | * (e.g. LRO flow ID). | |
351 | */ | |
352 | ||
353 | /* or more, if there's extension data */ | |
354 | (*fw_desc)++; | |
355 | (*fw_desc_len)--; | |
356 | } else { | |
357 | /* | |
358 | * When an oversized AMSDU happened, FW will lost | |
359 | * some of MSDU status - in this case, the FW | |
360 | * descriptors provided will be less than the | |
361 | * actual MSDUs inside this MPDU. Mark the FW | |
362 | * descriptors so that it will still deliver to | |
363 | * upper stack, if no CRC error for this MPDU. | |
364 | * | |
365 | * FIX THIS - the FW descriptors are actually for | |
366 | * MSDUs in the end of this A-MSDU instead of the | |
367 | * beginning. | |
368 | */ | |
369 | rx_desc->fw_desc.info0 = 0; | |
370 | } | |
371 | ||
372 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) | |
373 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | | |
374 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); | |
375 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), | |
376 | RX_MSDU_START_INFO0_MSDU_LENGTH); | |
377 | msdu_chained = rx_desc->frag_info.ring2_more_count; | |
378 | ||
379 | if (msdu_len_invalid) | |
380 | msdu_len = 0; | |
381 | ||
382 | skb_trim(msdu, 0); | |
383 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); | |
384 | msdu_len -= msdu->len; | |
385 | ||
386 | /* FIXME: Do chained buffers include htt_rx_desc or not? */ | |
387 | while (msdu_chained--) { | |
388 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); | |
389 | ||
390 | dma_unmap_single(htt->ar->dev, | |
391 | ATH10K_SKB_CB(next)->paddr, | |
392 | next->len + skb_tailroom(next), | |
393 | DMA_FROM_DEVICE); | |
394 | ||
395 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", | |
396 | next->data, | |
397 | next->len + skb_tailroom(next)); | |
398 | ||
399 | skb_trim(next, 0); | |
400 | skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); | |
401 | msdu_len -= next->len; | |
402 | ||
403 | msdu->next = next; | |
404 | msdu = next; | |
405 | msdu_chaining = 1; | |
406 | } | |
407 | ||
408 | if (msdu_len > 0) { | |
409 | /* This may suggest FW bug? */ | |
410 | ath10k_warn("htt rx msdu len not consumed (%d)\n", | |
411 | msdu_len); | |
412 | } | |
413 | ||
414 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & | |
415 | RX_MSDU_END_INFO0_LAST_MSDU; | |
416 | ||
417 | if (last_msdu) { | |
418 | msdu->next = NULL; | |
419 | break; | |
420 | } else { | |
421 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); | |
422 | msdu->next = next; | |
423 | msdu = next; | |
424 | } | |
425 | } | |
426 | *tail_msdu = msdu; | |
427 | ||
428 | /* | |
429 | * Don't refill the ring yet. | |
430 | * | |
431 | * First, the elements popped here are still in use - it is not | |
432 | * safe to overwrite them until the matching call to | |
433 | * mpdu_desc_list_next. Second, for efficiency it is preferable to | |
434 | * refill the rx ring with 1 PPDU's worth of rx buffers (something | |
435 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers | |
436 | * (something like 3 buffers). Consequently, we'll rely on the txrx | |
437 | * SW to tell us when it is done pulling all the PPDU's rx buffers | |
438 | * out of the rx ring, and then refill it just once. | |
439 | */ | |
440 | ||
441 | return msdu_chaining; | |
442 | } | |
443 | ||
444 | int ath10k_htt_rx_attach(struct ath10k_htt *htt) | |
445 | { | |
446 | dma_addr_t paddr; | |
447 | void *vaddr; | |
448 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; | |
449 | ||
450 | htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); | |
451 | if (!is_power_of_2(htt->rx_ring.size)) { | |
452 | ath10k_warn("htt rx ring size is not power of 2\n"); | |
453 | return -EINVAL; | |
454 | } | |
455 | ||
456 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; | |
457 | ||
458 | /* | |
459 | * Set the initial value for the level to which the rx ring | |
460 | * should be filled, based on the max throughput and the | |
461 | * worst likely latency for the host to fill the rx ring | |
462 | * with new buffers. In theory, this fill level can be | |
463 | * dynamically adjusted from the initial value set here, to | |
464 | * reflect the actual host latency rather than a | |
465 | * conservative assumption about the host latency. | |
466 | */ | |
467 | htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); | |
468 | ||
469 | htt->rx_ring.netbufs_ring = | |
470 | kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), | |
471 | GFP_KERNEL); | |
472 | if (!htt->rx_ring.netbufs_ring) | |
473 | goto err_netbuf; | |
474 | ||
475 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
476 | (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), | |
477 | &paddr, GFP_DMA); | |
478 | if (!vaddr) | |
479 | goto err_dma_ring; | |
480 | ||
481 | htt->rx_ring.paddrs_ring = vaddr; | |
482 | htt->rx_ring.base_paddr = paddr; | |
483 | ||
484 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
485 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
486 | &paddr, GFP_DMA); | |
487 | if (!vaddr) | |
488 | goto err_dma_idx; | |
489 | ||
490 | htt->rx_ring.alloc_idx.vaddr = vaddr; | |
491 | htt->rx_ring.alloc_idx.paddr = paddr; | |
492 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; | |
493 | *htt->rx_ring.alloc_idx.vaddr = 0; | |
494 | ||
495 | /* Initialize the Rx refill retry timer */ | |
496 | setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); | |
497 | ||
498 | spin_lock_init(&htt->rx_ring.lock); | |
499 | ||
500 | htt->rx_ring.fill_cnt = 0; | |
501 | if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) | |
502 | goto err_fill_ring; | |
503 | ||
504 | ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", | |
505 | htt->rx_ring.size, htt->rx_ring.fill_level); | |
506 | return 0; | |
507 | ||
508 | err_fill_ring: | |
509 | ath10k_htt_rx_ring_free(htt); | |
510 | dma_free_coherent(htt->ar->dev, | |
511 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
512 | htt->rx_ring.alloc_idx.vaddr, | |
513 | htt->rx_ring.alloc_idx.paddr); | |
514 | err_dma_idx: | |
515 | dma_free_coherent(htt->ar->dev, | |
516 | (htt->rx_ring.size * | |
517 | sizeof(htt->rx_ring.paddrs_ring)), | |
518 | htt->rx_ring.paddrs_ring, | |
519 | htt->rx_ring.base_paddr); | |
520 | err_dma_ring: | |
521 | kfree(htt->rx_ring.netbufs_ring); | |
522 | err_netbuf: | |
523 | return -ENOMEM; | |
524 | } | |
525 | ||
526 | static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) | |
527 | { | |
528 | switch (type) { | |
529 | case HTT_RX_MPDU_ENCRYPT_WEP40: | |
530 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
531 | return 4; | |
532 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | |
533 | case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ | |
534 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
535 | case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ | |
536 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | |
537 | return 8; | |
538 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
539 | return 0; | |
540 | } | |
541 | ||
542 | ath10k_warn("unknown encryption type %d\n", type); | |
543 | return 0; | |
544 | } | |
545 | ||
546 | static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) | |
547 | { | |
548 | switch (type) { | |
549 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
550 | case HTT_RX_MPDU_ENCRYPT_WEP40: | |
551 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
552 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
553 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
554 | return 0; | |
555 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | |
556 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
557 | return 4; | |
558 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | |
559 | return 8; | |
560 | } | |
561 | ||
562 | ath10k_warn("unknown encryption type %d\n", type); | |
563 | return 0; | |
564 | } | |
565 | ||
566 | /* Applies for first msdu in chain, before altering it. */ | |
567 | static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) | |
568 | { | |
569 | struct htt_rx_desc *rxd; | |
570 | enum rx_msdu_decap_format fmt; | |
571 | ||
572 | rxd = (void *)skb->data - sizeof(*rxd); | |
573 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
574 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
575 | ||
576 | if (fmt == RX_MSDU_DECAP_RAW) | |
577 | return (void *)skb->data; | |
578 | else | |
579 | return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; | |
580 | } | |
581 | ||
582 | /* This function only applies for first msdu in an msdu chain */ | |
583 | static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) | |
584 | { | |
585 | if (ieee80211_is_data_qos(hdr->frame_control)) { | |
586 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
587 | if (qc[0] & 0x80) | |
588 | return true; | |
589 | } | |
590 | return false; | |
591 | } | |
592 | ||
593 | static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, | |
594 | struct htt_rx_info *info) | |
595 | { | |
596 | struct htt_rx_desc *rxd; | |
597 | struct sk_buff *amsdu; | |
598 | struct sk_buff *first; | |
599 | struct ieee80211_hdr *hdr; | |
600 | struct sk_buff *skb = info->skb; | |
601 | enum rx_msdu_decap_format fmt; | |
602 | enum htt_rx_mpdu_encrypt_type enctype; | |
603 | unsigned int hdr_len; | |
604 | int crypto_len; | |
605 | ||
606 | rxd = (void *)skb->data - sizeof(*rxd); | |
607 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
608 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
609 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
610 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
611 | ||
612 | /* FIXME: No idea what assumptions are safe here. Need logs */ | |
dfa95b50 | 613 | if ((fmt == RX_MSDU_DECAP_RAW && skb->next)) { |
5e3dd157 KV |
614 | ath10k_htt_rx_free_msdu_chain(skb->next); |
615 | skb->next = NULL; | |
616 | return -ENOTSUPP; | |
617 | } | |
618 | ||
619 | /* A-MSDU max is a little less than 8K */ | |
620 | amsdu = dev_alloc_skb(8*1024); | |
621 | if (!amsdu) { | |
622 | ath10k_warn("A-MSDU allocation failed\n"); | |
623 | ath10k_htt_rx_free_msdu_chain(skb->next); | |
624 | skb->next = NULL; | |
625 | return -ENOMEM; | |
626 | } | |
627 | ||
628 | if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { | |
629 | int hdrlen; | |
630 | ||
631 | hdr = (void *)rxd->rx_hdr_status; | |
632 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
633 | memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); | |
634 | } | |
635 | ||
636 | first = skb; | |
637 | while (skb) { | |
638 | void *decap_hdr; | |
639 | int decap_len = 0; | |
640 | ||
641 | rxd = (void *)skb->data - sizeof(*rxd); | |
642 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
643 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
644 | decap_hdr = (void *)rxd->rx_hdr_status; | |
645 | ||
646 | if (skb == first) { | |
647 | /* We receive linked A-MSDU subframe skbuffs. The | |
648 | * first one contains the original 802.11 header (and | |
649 | * possible crypto param) in the RX descriptor. The | |
650 | * A-MSDU subframe header follows that. Each part is | |
651 | * aligned to 4 byte boundary. */ | |
652 | ||
653 | hdr = (void *)amsdu->data; | |
654 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
655 | crypto_len = ath10k_htt_rx_crypto_param_len(enctype); | |
656 | ||
657 | decap_hdr += roundup(hdr_len, 4); | |
658 | decap_hdr += roundup(crypto_len, 4); | |
659 | } | |
660 | ||
dfa95b50 MK |
661 | /* When fmt == RX_MSDU_DECAP_8023_SNAP_LLC: |
662 | * | |
663 | * SNAP 802.3 consists of: | |
664 | * [dst:6][src:6][len:2][dsap:1][ssap:1][ctl:1][snap:5] | |
665 | * [data][fcs:4]. | |
666 | * | |
667 | * Since this overlaps with A-MSDU header (da, sa, len) | |
668 | * there's nothing extra to do. */ | |
669 | ||
5e3dd157 KV |
670 | if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { |
671 | /* Ethernet2 decap inserts ethernet header in place of | |
672 | * A-MSDU subframe header. */ | |
673 | skb_pull(skb, 6 + 6 + 2); | |
674 | ||
675 | /* A-MSDU subframe header length */ | |
676 | decap_len += 6 + 6 + 2; | |
677 | ||
678 | /* Ethernet2 decap also strips the LLC/SNAP so we need | |
679 | * to re-insert it. The LLC/SNAP follows A-MSDU | |
680 | * subframe header. */ | |
681 | /* FIXME: Not all LLCs are 8 bytes long */ | |
682 | decap_len += 8; | |
683 | ||
684 | memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); | |
685 | } | |
686 | ||
687 | if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { | |
688 | /* Native Wifi decap inserts regular 802.11 header | |
689 | * in place of A-MSDU subframe header. */ | |
690 | hdr = (struct ieee80211_hdr *)skb->data; | |
691 | skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); | |
692 | ||
693 | /* A-MSDU subframe header length */ | |
694 | decap_len += 6 + 6 + 2; | |
695 | ||
696 | memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); | |
697 | } | |
698 | ||
699 | if (fmt == RX_MSDU_DECAP_RAW) | |
700 | skb_trim(skb, skb->len - 4); /* remove FCS */ | |
701 | ||
702 | memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); | |
703 | ||
704 | /* A-MSDU subframes are padded to 4bytes | |
705 | * but relative to first subframe, not the whole MPDU */ | |
706 | if (skb->next && ((decap_len + skb->len) & 3)) { | |
707 | int padlen = 4 - ((decap_len + skb->len) & 3); | |
708 | memset(skb_put(amsdu, padlen), 0, padlen); | |
709 | } | |
710 | ||
711 | skb = skb->next; | |
712 | } | |
713 | ||
714 | info->skb = amsdu; | |
715 | info->encrypt_type = enctype; | |
716 | ||
717 | ath10k_htt_rx_free_msdu_chain(first); | |
718 | ||
719 | return 0; | |
720 | } | |
721 | ||
722 | static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) | |
723 | { | |
724 | struct sk_buff *skb = info->skb; | |
725 | struct htt_rx_desc *rxd; | |
726 | struct ieee80211_hdr *hdr; | |
727 | enum rx_msdu_decap_format fmt; | |
728 | enum htt_rx_mpdu_encrypt_type enctype; | |
729 | ||
730 | /* This shouldn't happen. If it does than it may be a FW bug. */ | |
731 | if (skb->next) { | |
732 | ath10k_warn("received chained non A-MSDU frame\n"); | |
733 | ath10k_htt_rx_free_msdu_chain(skb->next); | |
734 | skb->next = NULL; | |
735 | } | |
736 | ||
737 | rxd = (void *)skb->data - sizeof(*rxd); | |
738 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
739 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
740 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
741 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
742 | hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; | |
743 | ||
744 | switch (fmt) { | |
745 | case RX_MSDU_DECAP_RAW: | |
746 | /* remove trailing FCS */ | |
747 | skb_trim(skb, skb->len - 4); | |
748 | break; | |
749 | case RX_MSDU_DECAP_NATIVE_WIFI: | |
750 | /* nothing to do here */ | |
751 | break; | |
752 | case RX_MSDU_DECAP_ETHERNET2_DIX: | |
753 | /* macaddr[6] + macaddr[6] + ethertype[2] */ | |
754 | skb_pull(skb, 6 + 6 + 2); | |
755 | break; | |
756 | case RX_MSDU_DECAP_8023_SNAP_LLC: | |
757 | /* macaddr[6] + macaddr[6] + len[2] */ | |
758 | /* we don't need this for non-A-MSDU */ | |
759 | skb_pull(skb, 6 + 6 + 2); | |
760 | break; | |
761 | } | |
762 | ||
763 | if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { | |
764 | void *llc; | |
765 | int llclen; | |
766 | ||
767 | llclen = 8; | |
768 | llc = hdr; | |
769 | llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); | |
770 | llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); | |
771 | ||
772 | skb_push(skb, llclen); | |
773 | memcpy(skb->data, llc, llclen); | |
774 | } | |
775 | ||
776 | if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { | |
777 | int len = ieee80211_hdrlen(hdr->frame_control); | |
778 | skb_push(skb, len); | |
779 | memcpy(skb->data, hdr, len); | |
780 | } | |
781 | ||
782 | info->skb = skb; | |
783 | info->encrypt_type = enctype; | |
784 | return 0; | |
785 | } | |
786 | ||
787 | static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) | |
788 | { | |
789 | struct htt_rx_desc *rxd; | |
790 | u32 flags; | |
791 | ||
792 | rxd = (void *)skb->data - sizeof(*rxd); | |
793 | flags = __le32_to_cpu(rxd->attention.flags); | |
794 | ||
795 | if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) | |
796 | return true; | |
797 | ||
798 | return false; | |
799 | } | |
800 | ||
801 | static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) | |
802 | { | |
803 | struct htt_rx_desc *rxd; | |
804 | u32 flags; | |
805 | ||
806 | rxd = (void *)skb->data - sizeof(*rxd); | |
807 | flags = __le32_to_cpu(rxd->attention.flags); | |
808 | ||
809 | if (flags & RX_ATTENTION_FLAGS_FCS_ERR) | |
810 | return true; | |
811 | ||
812 | return false; | |
813 | } | |
814 | ||
605f81aa MK |
815 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) |
816 | { | |
817 | struct htt_rx_desc *rxd; | |
818 | u32 flags, info; | |
819 | bool is_ip4, is_ip6; | |
820 | bool is_tcp, is_udp; | |
821 | bool ip_csum_ok, tcpudp_csum_ok; | |
822 | ||
823 | rxd = (void *)skb->data - sizeof(*rxd); | |
824 | flags = __le32_to_cpu(rxd->attention.flags); | |
825 | info = __le32_to_cpu(rxd->msdu_start.info1); | |
826 | ||
827 | is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); | |
828 | is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); | |
829 | is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); | |
830 | is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); | |
831 | ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); | |
832 | tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); | |
833 | ||
834 | if (!is_ip4 && !is_ip6) | |
835 | return CHECKSUM_NONE; | |
836 | if (!is_tcp && !is_udp) | |
837 | return CHECKSUM_NONE; | |
838 | if (!ip_csum_ok) | |
839 | return CHECKSUM_NONE; | |
840 | if (!tcpudp_csum_ok) | |
841 | return CHECKSUM_NONE; | |
842 | ||
843 | return CHECKSUM_UNNECESSARY; | |
844 | } | |
845 | ||
5e3dd157 KV |
846 | static void ath10k_htt_rx_handler(struct ath10k_htt *htt, |
847 | struct htt_rx_indication *rx) | |
848 | { | |
849 | struct htt_rx_info info; | |
850 | struct htt_rx_indication_mpdu_range *mpdu_ranges; | |
851 | struct ieee80211_hdr *hdr; | |
852 | int num_mpdu_ranges; | |
853 | int fw_desc_len; | |
854 | u8 *fw_desc; | |
855 | int i, j; | |
856 | int ret; | |
605f81aa | 857 | int ip_summed; |
5e3dd157 KV |
858 | |
859 | memset(&info, 0, sizeof(info)); | |
860 | ||
861 | fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); | |
862 | fw_desc = (u8 *)&rx->fw_desc; | |
863 | ||
864 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | |
865 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | |
866 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); | |
867 | ||
868 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", | |
869 | rx, sizeof(*rx) + | |
870 | (sizeof(struct htt_rx_indication_mpdu_range) * | |
871 | num_mpdu_ranges)); | |
872 | ||
873 | for (i = 0; i < num_mpdu_ranges; i++) { | |
874 | info.status = mpdu_ranges[i].mpdu_range_status; | |
875 | ||
876 | for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { | |
877 | struct sk_buff *msdu_head, *msdu_tail; | |
878 | enum htt_rx_mpdu_status status; | |
879 | int msdu_chaining; | |
880 | ||
881 | msdu_head = NULL; | |
882 | msdu_tail = NULL; | |
883 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, | |
884 | &fw_desc, | |
885 | &fw_desc_len, | |
886 | &msdu_head, | |
887 | &msdu_tail); | |
888 | ||
889 | if (!msdu_head) { | |
890 | ath10k_warn("htt rx no data!\n"); | |
891 | continue; | |
892 | } | |
893 | ||
894 | if (msdu_head->len == 0) { | |
895 | ath10k_dbg(ATH10K_DBG_HTT, | |
896 | "htt rx dropping due to zero-len\n"); | |
897 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
898 | continue; | |
899 | } | |
900 | ||
901 | if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { | |
902 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
903 | continue; | |
904 | } | |
905 | ||
906 | status = info.status; | |
907 | ||
908 | /* Skip mgmt frames while we handle this in WMI */ | |
909 | if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { | |
910 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
911 | continue; | |
912 | } | |
913 | ||
914 | if (status != HTT_RX_IND_MPDU_STATUS_OK && | |
915 | status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && | |
916 | !htt->ar->monitor_enabled) { | |
917 | ath10k_dbg(ATH10K_DBG_HTT, | |
918 | "htt rx ignoring frame w/ status %d\n", | |
919 | status); | |
920 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
921 | continue; | |
922 | } | |
923 | ||
924 | /* FIXME: we do not support chaining yet. | |
925 | * this needs investigation */ | |
926 | if (msdu_chaining) { | |
927 | ath10k_warn("msdu_chaining is true\n"); | |
928 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
929 | continue; | |
930 | } | |
931 | ||
605f81aa MK |
932 | /* The skb is not yet processed and it may be |
933 | * reallocated. Since the offload is in the original | |
934 | * skb extract the checksum now and assign it later */ | |
935 | ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); | |
936 | ||
5e3dd157 KV |
937 | info.skb = msdu_head; |
938 | info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); | |
939 | info.signal = ATH10K_DEFAULT_NOISE_FLOOR; | |
940 | info.signal += rx->ppdu.combined_rssi; | |
941 | ||
942 | info.rate.info0 = rx->ppdu.info0; | |
943 | info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); | |
944 | info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); | |
945 | ||
946 | hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); | |
947 | ||
948 | if (ath10k_htt_rx_hdr_is_amsdu(hdr)) | |
949 | ret = ath10k_htt_rx_amsdu(htt, &info); | |
950 | else | |
951 | ret = ath10k_htt_rx_msdu(htt, &info); | |
952 | ||
953 | if (ret && !info.fcs_err) { | |
954 | ath10k_warn("error processing msdus %d\n", ret); | |
955 | dev_kfree_skb_any(info.skb); | |
956 | continue; | |
957 | } | |
958 | ||
959 | if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) | |
960 | ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); | |
961 | ||
605f81aa MK |
962 | info.skb->ip_summed = ip_summed; |
963 | ||
5e3dd157 KV |
964 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", |
965 | info.skb->data, info.skb->len); | |
966 | ath10k_process_rx(htt->ar, &info); | |
967 | } | |
968 | } | |
969 | ||
970 | ath10k_htt_rx_msdu_buff_replenish(htt); | |
971 | } | |
972 | ||
973 | static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, | |
974 | struct htt_rx_fragment_indication *frag) | |
975 | { | |
976 | struct sk_buff *msdu_head, *msdu_tail; | |
977 | struct htt_rx_desc *rxd; | |
978 | enum rx_msdu_decap_format fmt; | |
979 | struct htt_rx_info info = {}; | |
980 | struct ieee80211_hdr *hdr; | |
981 | int msdu_chaining; | |
982 | bool tkip_mic_err; | |
983 | bool decrypt_err; | |
984 | u8 *fw_desc; | |
985 | int fw_desc_len, hdrlen, paramlen; | |
986 | int trim; | |
987 | ||
988 | fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); | |
989 | fw_desc = (u8 *)frag->fw_msdu_rx_desc; | |
990 | ||
991 | msdu_head = NULL; | |
992 | msdu_tail = NULL; | |
993 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, | |
994 | &msdu_head, &msdu_tail); | |
995 | ||
996 | ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); | |
997 | ||
998 | if (!msdu_head) { | |
999 | ath10k_warn("htt rx frag no data\n"); | |
1000 | return; | |
1001 | } | |
1002 | ||
1003 | if (msdu_chaining || msdu_head != msdu_tail) { | |
1004 | ath10k_warn("aggregation with fragmentation?!\n"); | |
1005 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
1006 | return; | |
1007 | } | |
1008 | ||
1009 | /* FIXME: implement signal strength */ | |
1010 | ||
1011 | hdr = (struct ieee80211_hdr *)msdu_head->data; | |
1012 | rxd = (void *)msdu_head->data - sizeof(*rxd); | |
1013 | tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & | |
1014 | RX_ATTENTION_FLAGS_TKIP_MIC_ERR); | |
1015 | decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & | |
1016 | RX_ATTENTION_FLAGS_DECRYPT_ERR); | |
1017 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
1018 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
1019 | ||
1020 | if (fmt != RX_MSDU_DECAP_RAW) { | |
1021 | ath10k_warn("we dont support non-raw fragmented rx yet\n"); | |
1022 | dev_kfree_skb_any(msdu_head); | |
1023 | goto end; | |
1024 | } | |
1025 | ||
1026 | info.skb = msdu_head; | |
1027 | info.status = HTT_RX_IND_MPDU_STATUS_OK; | |
1028 | info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
1029 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
605f81aa | 1030 | info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); |
5e3dd157 KV |
1031 | |
1032 | if (tkip_mic_err) { | |
1033 | ath10k_warn("tkip mic error\n"); | |
1034 | info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; | |
1035 | } | |
1036 | ||
1037 | if (decrypt_err) { | |
1038 | ath10k_warn("decryption err in fragmented rx\n"); | |
1039 | dev_kfree_skb_any(info.skb); | |
1040 | goto end; | |
1041 | } | |
1042 | ||
1043 | if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { | |
1044 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
1045 | paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); | |
1046 | ||
1047 | /* It is more efficient to move the header than the payload */ | |
1048 | memmove((void *)info.skb->data + paramlen, | |
1049 | (void *)info.skb->data, | |
1050 | hdrlen); | |
1051 | skb_pull(info.skb, paramlen); | |
1052 | hdr = (struct ieee80211_hdr *)info.skb->data; | |
1053 | } | |
1054 | ||
1055 | /* remove trailing FCS */ | |
1056 | trim = 4; | |
1057 | ||
1058 | /* remove crypto trailer */ | |
1059 | trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); | |
1060 | ||
1061 | /* last fragment of TKIP frags has MIC */ | |
1062 | if (!ieee80211_has_morefrags(hdr->frame_control) && | |
1063 | info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) | |
1064 | trim += 8; | |
1065 | ||
1066 | if (trim > info.skb->len) { | |
1067 | ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); | |
1068 | dev_kfree_skb_any(info.skb); | |
1069 | goto end; | |
1070 | } | |
1071 | ||
1072 | skb_trim(info.skb, info.skb->len - trim); | |
1073 | ||
1074 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", | |
1075 | info.skb->data, info.skb->len); | |
1076 | ath10k_process_rx(htt->ar, &info); | |
1077 | ||
1078 | end: | |
1079 | if (fw_desc_len > 0) { | |
1080 | ath10k_dbg(ATH10K_DBG_HTT, | |
1081 | "expecting more fragmented rx in one indication %d\n", | |
1082 | fw_desc_len); | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) | |
1087 | { | |
edb8236d | 1088 | struct ath10k_htt *htt = &ar->htt; |
5e3dd157 KV |
1089 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
1090 | ||
1091 | /* confirm alignment */ | |
1092 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | |
1093 | ath10k_warn("unaligned htt message, expect trouble\n"); | |
1094 | ||
1095 | ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", | |
1096 | resp->hdr.msg_type); | |
1097 | switch (resp->hdr.msg_type) { | |
1098 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { | |
1099 | htt->target_version_major = resp->ver_resp.major; | |
1100 | htt->target_version_minor = resp->ver_resp.minor; | |
1101 | complete(&htt->target_version_received); | |
1102 | break; | |
1103 | } | |
1104 | case HTT_T2H_MSG_TYPE_RX_IND: { | |
1105 | ath10k_htt_rx_handler(htt, &resp->rx_ind); | |
1106 | break; | |
1107 | } | |
1108 | case HTT_T2H_MSG_TYPE_PEER_MAP: { | |
1109 | struct htt_peer_map_event ev = { | |
1110 | .vdev_id = resp->peer_map.vdev_id, | |
1111 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), | |
1112 | }; | |
1113 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); | |
1114 | ath10k_peer_map_event(htt, &ev); | |
1115 | break; | |
1116 | } | |
1117 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { | |
1118 | struct htt_peer_unmap_event ev = { | |
1119 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), | |
1120 | }; | |
1121 | ath10k_peer_unmap_event(htt, &ev); | |
1122 | break; | |
1123 | } | |
1124 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { | |
1125 | struct htt_tx_done tx_done = {}; | |
1126 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); | |
1127 | ||
1128 | tx_done.msdu_id = | |
1129 | __le32_to_cpu(resp->mgmt_tx_completion.desc_id); | |
1130 | ||
1131 | switch (status) { | |
1132 | case HTT_MGMT_TX_STATUS_OK: | |
1133 | break; | |
1134 | case HTT_MGMT_TX_STATUS_RETRY: | |
1135 | tx_done.no_ack = true; | |
1136 | break; | |
1137 | case HTT_MGMT_TX_STATUS_DROP: | |
1138 | tx_done.discard = true; | |
1139 | break; | |
1140 | } | |
1141 | ||
1142 | ath10k_txrx_tx_completed(htt, &tx_done); | |
1143 | break; | |
1144 | } | |
1145 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { | |
1146 | struct htt_tx_done tx_done = {}; | |
1147 | int status = MS(resp->data_tx_completion.flags, | |
1148 | HTT_DATA_TX_STATUS); | |
1149 | __le16 msdu_id; | |
1150 | int i; | |
1151 | ||
1152 | switch (status) { | |
1153 | case HTT_DATA_TX_STATUS_NO_ACK: | |
1154 | tx_done.no_ack = true; | |
1155 | break; | |
1156 | case HTT_DATA_TX_STATUS_OK: | |
1157 | break; | |
1158 | case HTT_DATA_TX_STATUS_DISCARD: | |
1159 | case HTT_DATA_TX_STATUS_POSTPONE: | |
1160 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: | |
1161 | tx_done.discard = true; | |
1162 | break; | |
1163 | default: | |
1164 | ath10k_warn("unhandled tx completion status %d\n", | |
1165 | status); | |
1166 | tx_done.discard = true; | |
1167 | break; | |
1168 | } | |
1169 | ||
1170 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", | |
1171 | resp->data_tx_completion.num_msdus); | |
1172 | ||
1173 | for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { | |
1174 | msdu_id = resp->data_tx_completion.msdus[i]; | |
1175 | tx_done.msdu_id = __le16_to_cpu(msdu_id); | |
1176 | ath10k_txrx_tx_completed(htt, &tx_done); | |
1177 | } | |
1178 | break; | |
1179 | } | |
1180 | case HTT_T2H_MSG_TYPE_SEC_IND: { | |
1181 | struct ath10k *ar = htt->ar; | |
1182 | struct htt_security_indication *ev = &resp->security_indication; | |
1183 | ||
1184 | ath10k_dbg(ATH10K_DBG_HTT, | |
1185 | "sec ind peer_id %d unicast %d type %d\n", | |
1186 | __le16_to_cpu(ev->peer_id), | |
1187 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), | |
1188 | MS(ev->flags, HTT_SECURITY_TYPE)); | |
1189 | complete(&ar->install_key_done); | |
1190 | break; | |
1191 | } | |
1192 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { | |
1193 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", | |
1194 | skb->data, skb->len); | |
1195 | ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); | |
1196 | break; | |
1197 | } | |
1198 | case HTT_T2H_MSG_TYPE_TEST: | |
1199 | /* FIX THIS */ | |
1200 | break; | |
1201 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: | |
1202 | case HTT_T2H_MSG_TYPE_STATS_CONF: | |
1203 | case HTT_T2H_MSG_TYPE_RX_ADDBA: | |
1204 | case HTT_T2H_MSG_TYPE_RX_DELBA: | |
1205 | case HTT_T2H_MSG_TYPE_RX_FLUSH: | |
1206 | default: | |
1207 | ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", | |
1208 | resp->hdr.msg_type); | |
1209 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", | |
1210 | skb->data, skb->len); | |
1211 | break; | |
1212 | }; | |
1213 | ||
1214 | /* Free the indication buffer */ | |
1215 | dev_kfree_skb_any(skb); | |
1216 | } |