2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
27 htt
->num_pending_tx
--;
28 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
29 ieee80211_wake_queues(htt
->ar
->hw
);
32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
34 spin_lock_bh(&htt
->tx_lock
);
35 __ath10k_htt_tx_dec_pending(htt
);
36 spin_unlock_bh(&htt
->tx_lock
);
39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
43 spin_lock_bh(&htt
->tx_lock
);
45 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
) {
50 htt
->num_pending_tx
++;
51 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
52 ieee80211_stop_queues(htt
->ar
->hw
);
55 spin_unlock_bh(&htt
->tx_lock
);
59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
)
61 struct ath10k
*ar
= htt
->ar
;
64 lockdep_assert_held(&htt
->tx_lock
);
66 msdu_id
= find_first_zero_bit(htt
->used_msdu_ids
,
67 htt
->max_num_pending_tx
);
68 if (msdu_id
== htt
->max_num_pending_tx
)
71 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", msdu_id
);
72 __set_bit(msdu_id
, htt
->used_msdu_ids
);
76 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
78 struct ath10k
*ar
= htt
->ar
;
80 lockdep_assert_held(&htt
->tx_lock
);
82 if (!test_bit(msdu_id
, htt
->used_msdu_ids
))
83 ath10k_warn(ar
, "trying to free unallocated msdu_id %d\n",
86 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
87 __clear_bit(msdu_id
, htt
->used_msdu_ids
);
90 int ath10k_htt_tx_alloc(struct ath10k_htt
*htt
)
92 struct ath10k
*ar
= htt
->ar
;
94 spin_lock_init(&htt
->tx_lock
);
95 init_waitqueue_head(&htt
->empty_tx_wq
);
97 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, htt
->ar
->fw_features
))
98 htt
->max_num_pending_tx
= TARGET_10X_NUM_MSDU_DESC
;
100 htt
->max_num_pending_tx
= TARGET_NUM_MSDU_DESC
;
102 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
103 htt
->max_num_pending_tx
);
105 htt
->pending_tx
= kzalloc(sizeof(*htt
->pending_tx
) *
106 htt
->max_num_pending_tx
, GFP_KERNEL
);
107 if (!htt
->pending_tx
)
110 htt
->used_msdu_ids
= kzalloc(sizeof(unsigned long) *
111 BITS_TO_LONGS(htt
->max_num_pending_tx
),
113 if (!htt
->used_msdu_ids
) {
114 kfree(htt
->pending_tx
);
118 htt
->tx_pool
= dma_pool_create("ath10k htt tx pool", htt
->ar
->dev
,
119 sizeof(struct ath10k_htt_txbuf
), 4, 0);
121 kfree(htt
->used_msdu_ids
);
122 kfree(htt
->pending_tx
);
129 static void ath10k_htt_tx_free_pending(struct ath10k_htt
*htt
)
131 struct ath10k
*ar
= htt
->ar
;
132 struct htt_tx_done tx_done
= {0};
135 spin_lock_bh(&htt
->tx_lock
);
136 for (msdu_id
= 0; msdu_id
< htt
->max_num_pending_tx
; msdu_id
++) {
137 if (!test_bit(msdu_id
, htt
->used_msdu_ids
))
140 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n",
144 tx_done
.msdu_id
= msdu_id
;
146 ath10k_txrx_tx_unref(htt
, &tx_done
);
148 spin_unlock_bh(&htt
->tx_lock
);
151 void ath10k_htt_tx_free(struct ath10k_htt
*htt
)
153 ath10k_htt_tx_free_pending(htt
);
154 kfree(htt
->pending_tx
);
155 kfree(htt
->used_msdu_ids
);
156 dma_pool_destroy(htt
->tx_pool
);
159 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
161 dev_kfree_skb_any(skb
);
164 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
166 struct ath10k
*ar
= htt
->ar
;
172 len
+= sizeof(cmd
->hdr
);
173 len
+= sizeof(cmd
->ver_req
);
175 skb
= ath10k_htc_alloc_skb(ar
, len
);
180 cmd
= (struct htt_cmd
*)skb
->data
;
181 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
183 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
185 dev_kfree_skb_any(skb
);
192 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u8 mask
, u64 cookie
)
194 struct ath10k
*ar
= htt
->ar
;
195 struct htt_stats_req
*req
;
200 len
+= sizeof(cmd
->hdr
);
201 len
+= sizeof(cmd
->stats_req
);
203 skb
= ath10k_htc_alloc_skb(ar
, len
);
208 cmd
= (struct htt_cmd
*)skb
->data
;
209 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
211 req
= &cmd
->stats_req
;
213 memset(req
, 0, sizeof(*req
));
215 /* currently we support only max 8 bit masks so no need to worry
216 * about endian support */
217 req
->upload_types
[0] = mask
;
218 req
->reset_types
[0] = mask
;
219 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
220 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
221 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
223 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
225 ath10k_warn(ar
, "failed to send htt type stats request: %d",
227 dev_kfree_skb_any(skb
);
234 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt
*htt
)
236 struct ath10k
*ar
= htt
->ar
;
239 struct htt_rx_ring_setup_ring
*ring
;
240 const int num_rx_ring
= 1;
247 * the HW expects the buffer to be an integral number of 4-byte
250 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
251 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
253 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup
.hdr
)
254 + (sizeof(*ring
) * num_rx_ring
);
255 skb
= ath10k_htc_alloc_skb(ar
, len
);
261 cmd
= (struct htt_cmd
*)skb
->data
;
262 ring
= &cmd
->rx_setup
.rings
[0];
264 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
265 cmd
->rx_setup
.hdr
.num_rings
= 1;
267 /* FIXME: do we need all of this? */
269 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
270 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
271 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
272 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
273 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
274 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
275 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
276 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
277 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
278 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
279 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
280 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
281 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
282 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
283 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
284 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
286 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
288 ring
->fw_idx_shadow_reg_paddr
=
289 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
290 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
291 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
292 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
293 ring
->flags
= __cpu_to_le16(flags
);
294 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
296 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
298 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
299 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
300 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
301 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
302 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
303 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
304 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
305 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
306 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
307 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
311 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
313 dev_kfree_skb_any(skb
);
320 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt
*htt
,
321 u8 max_subfrms_ampdu
,
322 u8 max_subfrms_amsdu
)
324 struct ath10k
*ar
= htt
->ar
;
325 struct htt_aggr_conf
*aggr_conf
;
331 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
333 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
336 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
339 len
= sizeof(cmd
->hdr
);
340 len
+= sizeof(cmd
->aggr_conf
);
342 skb
= ath10k_htc_alloc_skb(ar
, len
);
347 cmd
= (struct htt_cmd
*)skb
->data
;
348 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
350 aggr_conf
= &cmd
->aggr_conf
;
351 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
352 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
354 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
355 aggr_conf
->max_num_amsdu_subframes
,
356 aggr_conf
->max_num_ampdu_subframes
);
358 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
360 dev_kfree_skb_any(skb
);
367 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
369 struct ath10k
*ar
= htt
->ar
;
370 struct device
*dev
= ar
->dev
;
371 struct sk_buff
*txdesc
= NULL
;
373 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
374 u8 vdev_id
= skb_cb
->vdev_id
;
379 res
= ath10k_htt_tx_inc_pending(htt
);
383 len
+= sizeof(cmd
->hdr
);
384 len
+= sizeof(cmd
->mgmt_tx
);
386 spin_lock_bh(&htt
->tx_lock
);
387 res
= ath10k_htt_tx_alloc_msdu_id(htt
);
389 spin_unlock_bh(&htt
->tx_lock
);
393 htt
->pending_tx
[msdu_id
] = msdu
;
394 spin_unlock_bh(&htt
->tx_lock
);
396 txdesc
= ath10k_htc_alloc_skb(ar
, len
);
399 goto err_free_msdu_id
;
402 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
404 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
406 goto err_free_txdesc
;
408 skb_put(txdesc
, len
);
409 cmd
= (struct htt_cmd
*)txdesc
->data
;
410 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
411 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
412 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
413 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
414 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
415 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
416 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
418 skb_cb
->htt
.txbuf
= NULL
;
420 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
427 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
429 dev_kfree_skb_any(txdesc
);
431 spin_lock_bh(&htt
->tx_lock
);
432 htt
->pending_tx
[msdu_id
] = NULL
;
433 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
434 spin_unlock_bh(&htt
->tx_lock
);
436 ath10k_htt_tx_dec_pending(htt
);
441 int ath10k_htt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
443 struct ath10k
*ar
= htt
->ar
;
444 struct device
*dev
= ar
->dev
;
445 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
446 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
447 struct ath10k_hif_sg_item sg_items
[2];
448 struct htt_data_tx_desc_frag
*frags
;
449 u8 vdev_id
= skb_cb
->vdev_id
;
450 u8 tid
= skb_cb
->htt
.tid
;
454 u16 msdu_id
, flags1
= 0;
459 res
= ath10k_htt_tx_inc_pending(htt
);
463 spin_lock_bh(&htt
->tx_lock
);
464 res
= ath10k_htt_tx_alloc_msdu_id(htt
);
466 spin_unlock_bh(&htt
->tx_lock
);
470 htt
->pending_tx
[msdu_id
] = msdu
;
471 spin_unlock_bh(&htt
->tx_lock
);
473 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
474 prefetch_len
= roundup(prefetch_len
, 4);
476 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
477 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
478 * fragment list host driver specifies directly frame pointer. */
479 use_frags
= htt
->target_version_major
< 3 ||
480 !ieee80211_is_mgmt(hdr
->frame_control
);
482 skb_cb
->htt
.txbuf
= dma_pool_alloc(htt
->tx_pool
, GFP_ATOMIC
,
484 if (!skb_cb
->htt
.txbuf
)
485 goto err_free_msdu_id
;
486 skb_cb
->htt
.txbuf_paddr
= paddr
;
488 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
490 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
494 if (likely(use_frags
)) {
495 frags
= skb_cb
->htt
.txbuf
->frags
;
497 frags
[0].paddr
= __cpu_to_le32(skb_cb
->paddr
);
498 frags
[0].len
= __cpu_to_le32(msdu
->len
);
502 flags0
|= SM(ATH10K_HW_TXRX_NATIVE_WIFI
,
503 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
505 frags_paddr
= skb_cb
->htt
.txbuf_paddr
;
507 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
508 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
510 frags_paddr
= skb_cb
->paddr
;
513 /* Normally all commands go through HTC which manages tx credits for
514 * each endpoint and notifies when tx is completed.
516 * HTT endpoint is creditless so there's no need to care about HTC
517 * flags. In that case it is trivial to fill the HTC header here.
519 * MSDU transmission is considered completed upon HTT event. This
520 * implies no relevant resources can be freed until after the event is
521 * received. That's why HTC tx completion handler itself is ignored by
522 * setting NULL to transfer_context for all sg items.
524 * There is simply no point in pushing HTT TX_FRM through HTC tx path
525 * as it's a waste of resources. By bypassing HTC it is possible to
526 * avoid extra memory allocations, compress data structures and thus
527 * improve performance. */
529 skb_cb
->htt
.txbuf
->htc_hdr
.eid
= htt
->eid
;
530 skb_cb
->htt
.txbuf
->htc_hdr
.len
= __cpu_to_le16(
531 sizeof(skb_cb
->htt
.txbuf
->cmd_hdr
) +
532 sizeof(skb_cb
->htt
.txbuf
->cmd_tx
) +
534 skb_cb
->htt
.txbuf
->htc_hdr
.flags
= 0;
536 if (!ieee80211_has_protected(hdr
->frame_control
))
537 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
539 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
541 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
542 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
543 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
544 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
546 /* Prevent firmware from sending up tx inspection requests. There's
547 * nothing ath10k can do with frames requested for inspection so force
548 * it to simply rely a regular tx completion with discard status.
550 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
552 skb_cb
->htt
.txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
553 skb_cb
->htt
.txbuf
->cmd_tx
.flags0
= flags0
;
554 skb_cb
->htt
.txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
555 skb_cb
->htt
.txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
556 skb_cb
->htt
.txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
557 skb_cb
->htt
.txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
558 skb_cb
->htt
.txbuf
->cmd_tx
.peerid
= __cpu_to_le32(HTT_INVALID_PEERID
);
560 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
561 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
562 flags0
, flags1
, msdu
->len
, msdu_id
, frags_paddr
,
563 (u32
)skb_cb
->paddr
, vdev_id
, tid
);
564 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
565 msdu
->data
, msdu
->len
);
567 sg_items
[0].transfer_id
= 0;
568 sg_items
[0].transfer_context
= NULL
;
569 sg_items
[0].vaddr
= &skb_cb
->htt
.txbuf
->htc_hdr
;
570 sg_items
[0].paddr
= skb_cb
->htt
.txbuf_paddr
+
571 sizeof(skb_cb
->htt
.txbuf
->frags
);
572 sg_items
[0].len
= sizeof(skb_cb
->htt
.txbuf
->htc_hdr
) +
573 sizeof(skb_cb
->htt
.txbuf
->cmd_hdr
) +
574 sizeof(skb_cb
->htt
.txbuf
->cmd_tx
);
576 sg_items
[1].transfer_id
= 0;
577 sg_items
[1].transfer_context
= NULL
;
578 sg_items
[1].vaddr
= msdu
->data
;
579 sg_items
[1].paddr
= skb_cb
->paddr
;
580 sg_items
[1].len
= prefetch_len
;
582 res
= ath10k_hif_tx_sg(htt
->ar
,
583 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
584 sg_items
, ARRAY_SIZE(sg_items
));
591 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
593 dma_pool_free(htt
->tx_pool
,
595 skb_cb
->htt
.txbuf_paddr
);
597 spin_lock_bh(&htt
->tx_lock
);
598 htt
->pending_tx
[msdu_id
] = NULL
;
599 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
600 spin_unlock_bh(&htt
->tx_lock
);
602 ath10k_htt_tx_dec_pending(htt
);