1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
80 return iwl_mvm_has_new_rx_api(mvm
) ?
81 sizeof(struct iwl_mvm_add_sta_cmd
) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7
);
85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
86 enum nl80211_iftype iftype
)
91 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
92 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
94 lockdep_assert_held(&mvm
->mutex
);
96 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype
!= NL80211_IFTYPE_STATION
)
98 reserved_ids
= BIT(0);
100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 for (sta_id
= 0; sta_id
< IWL_MVM_STATION_COUNT
; sta_id
++) {
102 if (BIT(sta_id
) & reserved_ids
)
105 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
106 lockdep_is_held(&mvm
->mutex
)))
109 return IWL_MVM_STATION_COUNT
;
112 /* send station add/update command to firmware */
113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
114 bool update
, unsigned int flags
)
116 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
117 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
118 .sta_id
= mvm_sta
->sta_id
,
119 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
120 .add_modify
= update
? 1 : 0,
121 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
122 STA_FLG_MIMO_EN_MSK
),
123 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
127 u32 agg_size
= 0, mpdu_dens
= 0;
129 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
130 add_sta_cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
131 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
133 if (flags
& STA_MODIFY_QUEUES
)
134 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
137 switch (sta
->bandwidth
) {
138 case IEEE80211_STA_RX_BW_160
:
139 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
141 case IEEE80211_STA_RX_BW_80
:
142 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
144 case IEEE80211_STA_RX_BW_40
:
145 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
147 case IEEE80211_STA_RX_BW_20
:
148 if (sta
->ht_cap
.ht_supported
)
149 add_sta_cmd
.station_flags
|=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
154 switch (sta
->rx_nss
) {
156 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
159 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
162 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
166 switch (sta
->smps_mode
) {
167 case IEEE80211_SMPS_AUTOMATIC
:
168 case IEEE80211_SMPS_NUM_MODES
:
171 case IEEE80211_SMPS_STATIC
:
173 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
174 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
176 case IEEE80211_SMPS_DYNAMIC
:
177 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
179 case IEEE80211_SMPS_OFF
:
184 if (sta
->ht_cap
.ht_supported
) {
185 add_sta_cmd
.station_flags_msk
|=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
187 STA_FLG_AGG_MPDU_DENS_MSK
);
189 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
192 if (sta
->vht_cap
.vht_supported
) {
193 agg_size
= sta
->vht_cap
.cap
&
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
197 } else if (sta
->ht_cap
.ht_supported
) {
198 agg_size
= sta
->ht_cap
.ampdu_factor
;
201 add_sta_cmd
.station_flags
|=
202 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
203 add_sta_cmd
.station_flags
|=
204 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
206 status
= ADD_STA_SUCCESS
;
207 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
208 iwl_mvm_add_sta_cmd_size(mvm
),
209 &add_sta_cmd
, &status
);
213 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
214 case ADD_STA_SUCCESS
:
215 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
219 IWL_ERR(mvm
, "ADD_STA failed\n");
226 static void iwl_mvm_rx_agg_session_expired(unsigned long data
)
228 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= (void *)data
;
229 struct iwl_mvm_baid_data
*ba_data
;
230 struct ieee80211_sta
*sta
;
231 struct iwl_mvm_sta
*mvm_sta
;
232 unsigned long timeout
;
236 ba_data
= rcu_dereference(*rcu_ptr
);
238 if (WARN_ON(!ba_data
))
241 if (!ba_data
->timeout
)
244 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
245 if (time_is_after_jiffies(timeout
)) {
246 mod_timer(&ba_data
->session_timer
, timeout
);
251 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
252 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta
->vif
,
254 sta
->addr
, ba_data
->tid
);
259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm
*mvm
,
260 struct ieee80211_sta
*sta
)
262 unsigned long used_hw_queues
;
263 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
264 unsigned int wdg_timeout
=
265 iwl_mvm_get_wd_timeout(mvm
, NULL
, true, false);
268 lockdep_assert_held(&mvm
->mutex
);
270 used_hw_queues
= iwl_mvm_get_used_hw_queues(mvm
, NULL
);
272 /* Find available queues, and allocate them to the ACs */
273 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
274 u8 queue
= find_first_zero_bit(&used_hw_queues
,
275 mvm
->first_agg_queue
);
277 if (queue
>= mvm
->first_agg_queue
) {
278 IWL_ERR(mvm
, "Failed to allocate STA queue\n");
282 __set_bit(queue
, &used_hw_queues
);
283 mvmsta
->hw_queue
[ac
] = queue
;
286 /* Found a place for all queues - enable them */
287 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
288 iwl_mvm_enable_ac_txq(mvm
, mvmsta
->hw_queue
[ac
],
289 mvmsta
->hw_queue
[ac
],
290 iwl_mvm_ac_to_tx_fifo
[ac
], 0,
292 mvmsta
->tfd_queue_msk
|= BIT(mvmsta
->hw_queue
[ac
]);
298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm
*mvm
,
299 struct ieee80211_sta
*sta
)
301 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
302 unsigned long sta_msk
;
305 lockdep_assert_held(&mvm
->mutex
);
307 /* disable the TDLS STA-specific queues */
308 sta_msk
= mvmsta
->tfd_queue_msk
;
309 for_each_set_bit(i
, &sta_msk
, sizeof(sta_msk
) * BITS_PER_BYTE
)
310 iwl_mvm_disable_txq(mvm
, i
, i
, IWL_MAX_TID_COUNT
, 0);
313 /* Disable aggregations for a bitmap of TIDs for a given station */
314 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
315 unsigned long disable_agg_tids
,
318 struct iwl_mvm_add_sta_cmd cmd
= {};
319 struct ieee80211_sta
*sta
;
320 struct iwl_mvm_sta
*mvmsta
;
325 spin_lock_bh(&mvm
->queue_info_lock
);
326 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
327 spin_unlock_bh(&mvm
->queue_info_lock
);
331 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
333 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
338 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
340 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
342 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
343 cmd
.sta_id
= mvmsta
->sta_id
;
344 cmd
.add_modify
= STA_MODE_MODIFY
;
345 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
346 if (disable_agg_tids
)
347 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
349 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
350 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
351 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
355 /* Notify FW of queue removal from the STA queues */
356 status
= ADD_STA_SUCCESS
;
357 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
358 iwl_mvm_add_sta_cmd_size(mvm
),
364 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
366 struct ieee80211_sta
*sta
;
367 struct iwl_mvm_sta
*mvmsta
;
368 unsigned long tid_bitmap
;
369 unsigned long agg_tids
= 0;
373 lockdep_assert_held(&mvm
->mutex
);
375 spin_lock_bh(&mvm
->queue_info_lock
);
376 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
377 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
378 spin_unlock_bh(&mvm
->queue_info_lock
);
380 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
381 lockdep_is_held(&mvm
->mutex
));
383 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
386 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
388 spin_lock_bh(&mvmsta
->lock
);
389 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
390 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
391 agg_tids
|= BIT(tid
);
393 spin_unlock_bh(&mvmsta
->lock
);
399 * Remove a queue from a station's resources.
400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
401 * doesn't disable the queue
403 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
405 struct ieee80211_sta
*sta
;
406 struct iwl_mvm_sta
*mvmsta
;
407 unsigned long tid_bitmap
;
408 unsigned long disable_agg_tids
= 0;
412 lockdep_assert_held(&mvm
->mutex
);
414 spin_lock_bh(&mvm
->queue_info_lock
);
415 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
416 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
417 spin_unlock_bh(&mvm
->queue_info_lock
);
421 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
428 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
430 spin_lock_bh(&mvmsta
->lock
);
431 /* Unmap MAC queues and TIDs from this queue */
432 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
433 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
434 disable_agg_tids
|= BIT(tid
);
435 mvmsta
->tid_data
[tid
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
438 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
439 spin_unlock_bh(&mvmsta
->lock
);
443 spin_lock_bh(&mvm
->queue_info_lock
);
444 /* Unmap MAC queues and TIDs from this queue */
445 mvm
->queue_info
[queue
].hw_queue_to_mac80211
= 0;
446 mvm
->queue_info
[queue
].hw_queue_refcount
= 0;
447 mvm
->queue_info
[queue
].tid_bitmap
= 0;
448 spin_unlock_bh(&mvm
->queue_info_lock
);
450 return disable_agg_tids
;
453 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
454 unsigned long tfd_queue_mask
, u8 ac
)
457 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
460 lockdep_assert_held(&mvm
->queue_info_lock
);
462 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
464 /* See what ACs the existing queues for this STA have */
465 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
466 /* Only DATA queues can be shared */
467 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
468 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
471 /* Don't try and take queues being reconfigured */
472 if (mvm
->queue_info
[queue
].status
==
473 IWL_MVM_QUEUE_RECONFIGURING
)
476 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
480 * The queue to share is chosen only from DATA queues as follows (in
481 * descending priority):
484 * 3. Highest AC queue that is lower than new AC
485 * 4. Any existing AC (there always is at least 1 DATA queue)
488 /* Priority 1: An AC_BE queue */
489 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
490 queue
= ac_to_queue
[IEEE80211_AC_BE
];
491 /* Priority 2: Same AC queue */
492 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
493 queue
= ac_to_queue
[ac
];
494 /* Priority 3a: If new AC is VO and VI exists - use VI */
495 else if (ac
== IEEE80211_AC_VO
&&
496 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
497 queue
= ac_to_queue
[IEEE80211_AC_VI
];
498 /* Priority 3b: No BE so only AC less than the new one is BK */
499 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
500 queue
= ac_to_queue
[IEEE80211_AC_BK
];
501 /* Priority 4a: No BE nor BK - use VI if exists */
502 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
503 queue
= ac_to_queue
[IEEE80211_AC_VI
];
504 /* Priority 4b: No BE, BK nor VI - use VO if exists */
505 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
506 queue
= ac_to_queue
[IEEE80211_AC_VO
];
508 /* Make sure queue found (or not) is legal */
509 if (!iwl_mvm_is_dqa_data_queue(mvm
, queue
) &&
510 !iwl_mvm_is_dqa_mgmt_queue(mvm
, queue
) &&
511 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)) {
512 IWL_ERR(mvm
, "No DATA queues available to share\n");
516 /* Make sure the queue isn't in the middle of being reconfigured */
517 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_RECONFIGURING
) {
519 "TXQ %d is in the middle of re-config - try again\n",
528 * If a given queue has a higher AC than the TID stream that is being compared
529 * to, the queue needs to be redirected to the lower AC. This function does that
530 * in such a case, otherwise - if no redirection required - it does nothing,
531 * unless the %force param is true.
533 int iwl_mvm_scd_queue_redirect(struct iwl_mvm
*mvm
, int queue
, int tid
,
534 int ac
, int ssn
, unsigned int wdg_timeout
,
537 struct iwl_scd_txq_cfg_cmd cmd
= {
539 .action
= SCD_CFG_DISABLE_QUEUE
,
546 * If the AC is lower than current one - FIFO needs to be redirected to
547 * the lowest one of the streams in the queue. Check if this is needed
549 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
550 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
551 * we need to check if the numerical value of X is LARGER than of Y.
553 spin_lock_bh(&mvm
->queue_info_lock
);
554 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
555 spin_unlock_bh(&mvm
->queue_info_lock
);
557 IWL_DEBUG_TX_QUEUES(mvm
,
558 "No redirection needed on TXQ #%d\n",
563 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
564 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
565 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
566 mq
= mvm
->queue_info
[queue
].hw_queue_to_mac80211
;
567 shared_queue
= (mvm
->queue_info
[queue
].hw_queue_refcount
> 1);
568 spin_unlock_bh(&mvm
->queue_info_lock
);
570 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting TXQ #%d to FIFO #%d\n",
571 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
573 /* Stop MAC queues and wait for this queue to empty */
574 iwl_mvm_stop_mac_queues(mvm
, mq
);
575 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
, BIT(queue
));
577 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
583 /* Before redirecting the queue we need to de-activate it */
584 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
585 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
587 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
590 /* Make sure the SCD wrptr is correctly set before reconfiguring */
591 iwl_trans_txq_enable(mvm
->trans
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
592 cmd
.sta_id
, tid
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
,
595 /* Update the TID "owner" of the queue */
596 spin_lock_bh(&mvm
->queue_info_lock
);
597 mvm
->queue_info
[queue
].txq_tid
= tid
;
598 spin_unlock_bh(&mvm
->queue_info_lock
);
600 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
602 /* Redirect to lower AC */
603 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
604 cmd
.sta_id
, tid
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
,
607 /* Update AC marking of the queue */
608 spin_lock_bh(&mvm
->queue_info_lock
);
609 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
610 spin_unlock_bh(&mvm
->queue_info_lock
);
613 * Mark queue as shared in transport if shared
614 * Note this has to be done after queue enablement because enablement
615 * can also set this value, and there is no indication there to shared
619 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
622 /* Continue using the MAC queues */
623 iwl_mvm_start_mac_queues(mvm
, mq
);
628 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
629 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
630 struct ieee80211_hdr
*hdr
)
632 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
633 struct iwl_trans_txq_scd_cfg cfg
= {
634 .fifo
= iwl_mvm_ac_to_tx_fifo
[ac
],
635 .sta_id
= mvmsta
->sta_id
,
637 .frame_limit
= IWL_FRAME_LIMIT
,
639 unsigned int wdg_timeout
=
640 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
641 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
643 bool using_inactive_queue
= false;
644 unsigned long disable_agg_tids
= 0;
645 enum iwl_mvm_agg_state queue_state
;
646 bool shared_queue
= false;
648 unsigned long tfd_queue_mask
;
651 lockdep_assert_held(&mvm
->mutex
);
653 spin_lock_bh(&mvmsta
->lock
);
654 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
655 spin_unlock_bh(&mvmsta
->lock
);
657 spin_lock_bh(&mvm
->queue_info_lock
);
660 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
663 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
664 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
665 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
666 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
667 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
668 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
669 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
672 /* If no such queue is found, we'll use a DATA queue instead */
675 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
676 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
677 IWL_MVM_QUEUE_RESERVED
||
678 mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
679 IWL_MVM_QUEUE_INACTIVE
)) {
680 queue
= mvmsta
->reserved_queue
;
681 mvm
->queue_info
[queue
].reserved
= true;
682 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
686 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
687 IWL_MVM_DQA_MIN_DATA_QUEUE
,
688 IWL_MVM_DQA_MAX_DATA_QUEUE
);
691 * Check if this queue is already allocated but inactive.
692 * In such a case, we'll need to first free this queue before enabling
693 * it again, so we'll mark it as reserved to make sure no new traffic
697 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
698 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
699 using_inactive_queue
= true;
700 IWL_DEBUG_TX_QUEUES(mvm
,
701 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
702 queue
, mvmsta
->sta_id
, tid
);
705 /* No free queue - we'll have to share */
707 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
710 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
715 * Mark TXQ as ready, even though it hasn't been fully configured yet,
716 * to make sure no one else takes it.
717 * This will allow avoiding re-acquiring the lock at the end of the
718 * configuration. On error we'll mark it back as free.
720 if ((queue
> 0) && !shared_queue
)
721 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
723 spin_unlock_bh(&mvm
->queue_info_lock
);
725 /* This shouldn't happen - out of queues */
726 if (WARN_ON(queue
<= 0)) {
727 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
733 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
734 * but for configuring the SCD to send A-MPDUs we need to mark the queue
736 * Mark all DATA queues as allowing to be aggregated at some point
738 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
739 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
742 * If this queue was previously inactive (idle) - we need to free it
745 if (using_inactive_queue
) {
746 struct iwl_scd_txq_cfg_cmd cmd
= {
748 .action
= SCD_CFG_DISABLE_QUEUE
,
752 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
754 spin_lock_bh(&mvm
->queue_info_lock
);
755 ac
= mvm
->queue_info
[queue
].mac80211_ac
;
756 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
757 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[ac
];
758 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
759 spin_unlock_bh(&mvm
->queue_info_lock
);
761 /* Disable the queue */
762 if (disable_agg_tids
)
763 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
764 disable_agg_tids
, false);
765 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
766 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
),
770 "Failed to free inactive queue %d (ret=%d)\n",
773 /* Re-mark the inactive queue as inactive */
774 spin_lock_bh(&mvm
->queue_info_lock
);
775 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_INACTIVE
;
776 spin_unlock_bh(&mvm
->queue_info_lock
);
781 /* If TXQ is allocated to another STA, update removal in FW */
782 if (cmd
.sta_id
!= mvmsta
->sta_id
)
783 iwl_mvm_invalidate_sta_queue(mvm
, queue
, 0, true);
786 IWL_DEBUG_TX_QUEUES(mvm
,
787 "Allocating %squeue #%d to sta %d on tid %d\n",
788 shared_queue
? "shared " : "", queue
,
789 mvmsta
->sta_id
, tid
);
792 /* Disable any open aggs on this queue */
793 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
795 if (disable_agg_tids
) {
796 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
798 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
799 disable_agg_tids
, false);
803 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
804 iwl_mvm_enable_txq(mvm
, queue
, mac_queue
, ssn
, &cfg
,
808 * Mark queue as shared in transport if shared
809 * Note this has to be done after queue enablement because enablement
810 * can also set this value, and there is no indication there to shared
814 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
816 spin_lock_bh(&mvmsta
->lock
);
817 mvmsta
->tid_data
[tid
].txq_id
= queue
;
818 mvmsta
->tid_data
[tid
].is_tid_active
= true;
819 mvmsta
->tfd_queue_msk
|= BIT(queue
);
820 queue_state
= mvmsta
->tid_data
[tid
].state
;
822 if (mvmsta
->reserved_queue
== queue
)
823 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
824 spin_unlock_bh(&mvmsta
->lock
);
827 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
831 /* If we need to re-enable aggregations... */
832 if (queue_state
== IWL_AGG_ON
) {
833 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
838 /* Redirect queue, if needed */
839 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
, ac
, ssn
,
848 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
853 static void iwl_mvm_change_queue_owner(struct iwl_mvm
*mvm
, int queue
)
855 struct iwl_scd_txq_cfg_cmd cmd
= {
857 .action
= SCD_CFG_UPDATE_QUEUE_TID
,
861 unsigned long tid_bitmap
;
864 lockdep_assert_held(&mvm
->mutex
);
866 spin_lock_bh(&mvm
->queue_info_lock
);
867 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
868 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
869 spin_unlock_bh(&mvm
->queue_info_lock
);
871 if (WARN(!tid_bitmap
, "TXQ %d has no tids assigned to it\n", queue
))
874 /* Find any TID for queue */
875 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
877 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
879 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
881 IWL_ERR(mvm
, "Failed to update owner of TXQ %d (ret=%d)\n",
884 IWL_DEBUG_TX_QUEUES(mvm
, "Changed TXQ %d ownership to tid %d\n",
888 static void iwl_mvm_unshare_queue(struct iwl_mvm
*mvm
, int queue
)
890 struct ieee80211_sta
*sta
;
891 struct iwl_mvm_sta
*mvmsta
;
894 unsigned long tid_bitmap
;
895 unsigned int wdg_timeout
;
899 lockdep_assert_held(&mvm
->mutex
);
901 spin_lock_bh(&mvm
->queue_info_lock
);
902 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
903 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
904 spin_unlock_bh(&mvm
->queue_info_lock
);
906 /* Find TID for queue, and make sure it is the only one on the queue */
907 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
908 if (tid_bitmap
!= BIT(tid
)) {
909 IWL_ERR(mvm
, "Failed to unshare q %d, active tids=0x%lx\n",
914 IWL_DEBUG_TX_QUEUES(mvm
, "Unsharing TXQ %d, keeping tid %d\n", queue
,
917 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
918 lockdep_is_held(&mvm
->mutex
));
920 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
923 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
924 wdg_timeout
= iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
926 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
928 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
,
929 tid_to_mac80211_ac
[tid
], ssn
,
932 IWL_ERR(mvm
, "Failed to redirect TXQ %d\n", queue
);
936 /* If aggs should be turned back on - do it */
937 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
) {
938 struct iwl_mvm_add_sta_cmd cmd
;
940 mvmsta
->tid_disable_agg
&= ~BIT(tid
);
942 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
943 cmd
.sta_id
= mvmsta
->sta_id
;
944 cmd
.add_modify
= STA_MODE_MODIFY
;
945 cmd
.modify_mask
= STA_MODIFY_TID_DISABLE_TX
;
946 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
947 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
949 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
950 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
952 IWL_DEBUG_TX_QUEUES(mvm
,
953 "TXQ #%d is now aggregated again\n",
956 /* Mark queue intenally as aggregating again */
957 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, false);
961 spin_lock_bh(&mvm
->queue_info_lock
);
962 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
963 spin_unlock_bh(&mvm
->queue_info_lock
);
966 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
968 if (tid
== IWL_MAX_TID_COUNT
)
969 return IEEE80211_AC_VO
; /* MGMT */
971 return tid_to_mac80211_ac
[tid
];
974 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
975 struct ieee80211_sta
*sta
, int tid
)
977 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
978 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
980 struct ieee80211_hdr
*hdr
;
981 struct sk_buff_head deferred_tx
;
983 bool no_queue
= false; /* Marks if there is a problem with the queue */
986 lockdep_assert_held(&mvm
->mutex
);
988 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
991 hdr
= (void *)skb
->data
;
993 ac
= iwl_mvm_tid_to_ac_queue(tid
);
994 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
996 if (tid_data
->txq_id
== IEEE80211_INVAL_HW_QUEUE
&&
997 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
999 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1000 mvmsta
->sta_id
, tid
);
1003 * Mark queue as problematic so later the deferred traffic is
1004 * freed, as we can do nothing with it
1009 __skb_queue_head_init(&deferred_tx
);
1011 /* Disable bottom-halves when entering TX path */
1013 spin_lock(&mvmsta
->lock
);
1014 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
1015 spin_unlock(&mvmsta
->lock
);
1017 while ((skb
= __skb_dequeue(&deferred_tx
)))
1018 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
1019 ieee80211_free_txskb(mvm
->hw
, skb
);
1023 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
1026 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
1028 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
1030 struct ieee80211_sta
*sta
;
1031 struct iwl_mvm_sta
*mvmsta
;
1032 unsigned long deferred_tid_traffic
;
1033 int queue
, sta_id
, tid
;
1035 /* Check inactivity of queues */
1036 iwl_mvm_inactivity_check(mvm
);
1038 mutex_lock(&mvm
->mutex
);
1040 /* Reconfigure queues requiring reconfiguation */
1041 for (queue
= 0; queue
< IWL_MAX_HW_QUEUES
; queue
++) {
1045 spin_lock_bh(&mvm
->queue_info_lock
);
1046 reconfig
= (mvm
->queue_info
[queue
].status
==
1047 IWL_MVM_QUEUE_RECONFIGURING
);
1050 * We need to take into account a situation in which a TXQ was
1051 * allocated to TID x, and then turned shared by adding TIDs y
1052 * and z. If TID x becomes inactive and is removed from the TXQ,
1053 * ownership must be given to one of the remaining TIDs.
1054 * This is mainly because if TID x continues - a new queue can't
1055 * be allocated for it as long as it is an owner of another TXQ.
1057 change_owner
= !(mvm
->queue_info
[queue
].tid_bitmap
&
1058 BIT(mvm
->queue_info
[queue
].txq_tid
)) &&
1059 (mvm
->queue_info
[queue
].status
==
1060 IWL_MVM_QUEUE_SHARED
);
1061 spin_unlock_bh(&mvm
->queue_info_lock
);
1064 iwl_mvm_unshare_queue(mvm
, queue
);
1065 else if (change_owner
)
1066 iwl_mvm_change_queue_owner(mvm
, queue
);
1069 /* Go over all stations with deferred traffic */
1070 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
1071 IWL_MVM_STATION_COUNT
) {
1072 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
1073 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1074 lockdep_is_held(&mvm
->mutex
));
1075 if (IS_ERR_OR_NULL(sta
))
1078 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1079 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
1081 for_each_set_bit(tid
, &deferred_tid_traffic
,
1082 IWL_MAX_TID_COUNT
+ 1)
1083 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
1086 mutex_unlock(&mvm
->mutex
);
1089 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
1090 struct ieee80211_sta
*sta
,
1091 enum nl80211_iftype vif_type
)
1093 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1097 * Check for inactive queues, so we don't reach a situation where we
1098 * can't add a STA due to a shortage in queues that doesn't really exist
1100 iwl_mvm_inactivity_check(mvm
);
1102 spin_lock_bh(&mvm
->queue_info_lock
);
1104 /* Make sure we have free resources for this STA */
1105 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
1106 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
1107 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
1108 IWL_MVM_QUEUE_FREE
))
1109 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
1111 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1112 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1113 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1115 spin_unlock_bh(&mvm
->queue_info_lock
);
1116 IWL_ERR(mvm
, "No available queues for new station\n");
1119 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
1121 spin_unlock_bh(&mvm
->queue_info_lock
);
1123 mvmsta
->reserved_queue
= queue
;
1125 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
1126 queue
, mvmsta
->sta_id
);
1132 * In DQA mode, after a HW restart the queues should be allocated as before, in
1133 * order to avoid race conditions when there are shared queues. This function
1134 * does the re-mapping and queue allocation.
1136 * Note that re-enabling aggregations isn't done in this function.
1138 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm
*mvm
,
1139 struct iwl_mvm_sta
*mvm_sta
)
1141 unsigned int wdg_timeout
=
1142 iwl_mvm_get_wd_timeout(mvm
, mvm_sta
->vif
, false, false);
1144 struct iwl_trans_txq_scd_cfg cfg
= {
1145 .sta_id
= mvm_sta
->sta_id
,
1146 .frame_limit
= IWL_FRAME_LIMIT
,
1149 /* Make sure reserved queue is still marked as such (or allocated) */
1150 mvm
->queue_info
[mvm_sta
->reserved_queue
].status
=
1151 IWL_MVM_QUEUE_RESERVED
;
1153 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1154 struct iwl_mvm_tid_data
*tid_data
= &mvm_sta
->tid_data
[i
];
1155 int txq_id
= tid_data
->txq_id
;
1159 if (txq_id
== IEEE80211_INVAL_HW_QUEUE
)
1162 skb_queue_head_init(&tid_data
->deferred_tx_frames
);
1164 ac
= tid_to_mac80211_ac
[i
];
1165 mac_queue
= mvm_sta
->vif
->hw_queue
[ac
];
1168 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[ac
];
1169 cfg
.aggregate
= (txq_id
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1170 txq_id
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1172 IWL_DEBUG_TX_QUEUES(mvm
,
1173 "Re-mapping sta %d tid %d to queue %d\n",
1174 mvm_sta
->sta_id
, i
, txq_id
);
1176 iwl_mvm_enable_txq(mvm
, txq_id
, mac_queue
,
1177 IEEE80211_SEQ_TO_SN(tid_data
->seq_number
),
1180 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1183 atomic_set(&mvm
->pending_frames
[mvm_sta
->sta_id
], 0);
1186 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
1187 struct ieee80211_vif
*vif
,
1188 struct ieee80211_sta
*sta
)
1190 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1191 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1192 struct iwl_mvm_rxq_dup_data
*dup_data
;
1195 lockdep_assert_held(&mvm
->mutex
);
1197 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1198 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
1199 ieee80211_vif_type_p2p(vif
));
1201 sta_id
= mvm_sta
->sta_id
;
1203 if (sta_id
== IWL_MVM_STATION_COUNT
)
1206 spin_lock_init(&mvm_sta
->lock
);
1208 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1209 if (iwl_mvm_is_dqa_supported(mvm
) &&
1210 test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1211 iwl_mvm_realloc_queues_after_restart(mvm
, mvm_sta
);
1215 mvm_sta
->sta_id
= sta_id
;
1216 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1219 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
1220 mvm_sta
->tx_protection
= 0;
1221 mvm_sta
->tt_tx_protection
= false;
1223 /* HW restart, don't assume the memory has been zeroed */
1224 atomic_set(&mvm
->pending_frames
[sta_id
], 0);
1225 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
1226 mvm_sta
->tfd_queue_msk
= 0;
1229 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1230 * and then they'll be allocated dynamically
1232 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
) {
1233 ret
= iwl_mvm_tdls_sta_init(mvm
, sta
);
1236 } else if (!iwl_mvm_is_dqa_supported(mvm
)) {
1237 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++)
1238 if (vif
->hw_queue
[i
] != IEEE80211_INVAL_HW_QUEUE
)
1239 mvm_sta
->tfd_queue_msk
|= BIT(vif
->hw_queue
[i
]);
1242 /* for HW restart - reset everything but the sequence number */
1243 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1244 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
1245 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
1246 mvm_sta
->tid_data
[i
].seq_number
= seq
;
1248 if (!iwl_mvm_is_dqa_supported(mvm
))
1252 * Mark all queues for this STA as unallocated and defer TX
1253 * frames until the queue is allocated
1255 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
1256 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
1258 mvm_sta
->deferred_traffic_tid_map
= 0;
1259 mvm_sta
->agg_tids
= 0;
1261 if (iwl_mvm_has_new_rx_api(mvm
) &&
1262 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1263 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1268 mvm_sta
->dup_data
= dup_data
;
1271 if (iwl_mvm_is_dqa_supported(mvm
)) {
1272 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1273 ieee80211_vif_type_p2p(vif
));
1279 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, false, 0);
1283 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1285 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
);
1286 mvmvif
->ap_sta_id
= sta_id
;
1288 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_STATION_COUNT
);
1292 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1297 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
)
1298 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
1302 int iwl_mvm_update_sta(struct iwl_mvm
*mvm
,
1303 struct ieee80211_vif
*vif
,
1304 struct ieee80211_sta
*sta
)
1306 return iwl_mvm_sta_send_to_fw(mvm
, sta
, true, 0);
1309 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1312 struct iwl_mvm_add_sta_cmd cmd
= {};
1316 lockdep_assert_held(&mvm
->mutex
);
1318 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1319 cmd
.sta_id
= mvmsta
->sta_id
;
1320 cmd
.add_modify
= STA_MODE_MODIFY
;
1321 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1322 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1324 status
= ADD_STA_SUCCESS
;
1325 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1326 iwl_mvm_add_sta_cmd_size(mvm
),
1331 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1332 case ADD_STA_SUCCESS
:
1333 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1338 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1347 * Remove a station from the FW table. Before sending the command to remove
1348 * the station validate that the station is indeed known to the driver (sanity
1351 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1353 struct ieee80211_sta
*sta
;
1354 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1359 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1360 lockdep_is_held(&mvm
->mutex
));
1362 /* Note: internal stations are marked as error values */
1364 IWL_ERR(mvm
, "Invalid station id\n");
1368 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1369 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1371 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1378 void iwl_mvm_sta_drained_wk(struct work_struct
*wk
)
1380 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, sta_drained_wk
);
1384 * The mutex is needed because of the SYNC cmd, but not only: if the
1385 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1386 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1387 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1390 mutex_lock(&mvm
->mutex
);
1392 for_each_set_bit(sta_id
, mvm
->sta_drained
, IWL_MVM_STATION_COUNT
) {
1394 struct ieee80211_sta
*sta
=
1395 rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1396 lockdep_is_held(&mvm
->mutex
));
1399 * This station is in use or RCU-removed; the latter happens in
1400 * managed mode, where mac80211 removes the station before we
1401 * can remove it from firmware (we can only do that after the
1402 * MAC is marked unassociated), and possibly while the deauth
1403 * frame to disconnect from the AP is still queued. Then, the
1404 * station pointer is -ENOENT when the last skb is reclaimed.
1406 if (!IS_ERR(sta
) || PTR_ERR(sta
) == -ENOENT
)
1409 if (PTR_ERR(sta
) == -EINVAL
) {
1410 IWL_ERR(mvm
, "Drained sta %d, but it is internal?\n",
1416 IWL_ERR(mvm
, "Drained sta %d, but it was NULL?\n",
1421 WARN_ON(PTR_ERR(sta
) != -EBUSY
);
1422 /* This station was removed and we waited until it got drained,
1423 * we can now proceed and remove it.
1425 ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1428 "Couldn't remove sta %d after it was drained\n",
1432 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1433 clear_bit(sta_id
, mvm
->sta_drained
);
1435 if (mvm
->tfd_drained
[sta_id
]) {
1436 unsigned long i
, msk
= mvm
->tfd_drained
[sta_id
];
1438 for_each_set_bit(i
, &msk
, sizeof(msk
) * BITS_PER_BYTE
)
1439 iwl_mvm_disable_txq(mvm
, i
, i
,
1440 IWL_MAX_TID_COUNT
, 0);
1442 mvm
->tfd_drained
[sta_id
] = 0;
1443 IWL_DEBUG_TDLS(mvm
, "Drained sta %d, with queues %ld\n",
1448 mutex_unlock(&mvm
->mutex
);
1451 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1452 struct ieee80211_vif
*vif
,
1453 struct iwl_mvm_sta
*mvm_sta
)
1458 lockdep_assert_held(&mvm
->mutex
);
1460 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1461 if (mvm_sta
->tid_data
[i
].txq_id
== IEEE80211_INVAL_HW_QUEUE
)
1464 ac
= iwl_mvm_tid_to_ac_queue(i
);
1465 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
1466 vif
->hw_queue
[ac
], i
, 0);
1467 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
1471 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1472 struct ieee80211_vif
*vif
,
1473 struct ieee80211_sta
*sta
)
1475 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1476 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1479 lockdep_assert_held(&mvm
->mutex
);
1481 if (iwl_mvm_has_new_rx_api(mvm
))
1482 kfree(mvm_sta
->dup_data
);
1484 if ((vif
->type
== NL80211_IFTYPE_STATION
&&
1485 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) ||
1486 iwl_mvm_is_dqa_supported(mvm
)){
1487 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1490 /* flush its queues here since we are freeing mvm_sta */
1491 ret
= iwl_mvm_flush_tx_path(mvm
, mvm_sta
->tfd_queue_msk
, 0);
1494 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
1495 mvm_sta
->tfd_queue_msk
);
1498 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1500 /* If DQA is supported - the queues can be disabled now */
1501 if (iwl_mvm_is_dqa_supported(mvm
))
1502 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
1504 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1505 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) {
1506 /* if associated - we can't remove the AP STA now */
1507 if (vif
->bss_conf
.assoc
)
1510 /* unassoc - go ahead - remove the AP STA now */
1511 mvmvif
->ap_sta_id
= IWL_MVM_STATION_COUNT
;
1513 /* clear d0i3_ap_sta_id if no longer relevant */
1514 if (mvm
->d0i3_ap_sta_id
== mvm_sta
->sta_id
)
1515 mvm
->d0i3_ap_sta_id
= IWL_MVM_STATION_COUNT
;
1520 * This shouldn't happen - the TDLS channel switch should be canceled
1521 * before the STA is removed.
1523 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== mvm_sta
->sta_id
)) {
1524 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
1525 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1529 * Make sure that the tx response code sees the station as -EBUSY and
1530 * calls the drain worker.
1532 spin_lock_bh(&mvm_sta
->lock
);
1534 * There are frames pending on the AC queues for this station.
1535 * We need to wait until all the frames are drained...
1537 if (atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
])) {
1538 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
],
1540 spin_unlock_bh(&mvm_sta
->lock
);
1542 /* disable TDLS sta queues on drain complete */
1544 mvm
->tfd_drained
[mvm_sta
->sta_id
] =
1545 mvm_sta
->tfd_queue_msk
;
1546 IWL_DEBUG_TDLS(mvm
, "Draining TDLS sta %d\n",
1550 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1552 spin_unlock_bh(&mvm_sta
->lock
);
1554 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
)
1555 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
1557 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1558 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1564 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1565 struct ieee80211_vif
*vif
,
1568 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1570 lockdep_assert_held(&mvm
->mutex
);
1572 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1576 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1577 struct iwl_mvm_int_sta
*sta
,
1578 u32 qmask
, enum nl80211_iftype iftype
)
1580 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1581 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1582 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_STATION_COUNT
))
1586 sta
->tfd_queue_msk
= qmask
;
1588 /* put a non-NULL value so iterating over the stations won't stop */
1589 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1593 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
,
1594 struct iwl_mvm_int_sta
*sta
)
1596 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1597 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1598 sta
->sta_id
= IWL_MVM_STATION_COUNT
;
1601 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1602 struct iwl_mvm_int_sta
*sta
,
1604 u16 mac_id
, u16 color
)
1606 struct iwl_mvm_add_sta_cmd cmd
;
1610 lockdep_assert_held(&mvm
->mutex
);
1612 memset(&cmd
, 0, sizeof(cmd
));
1613 cmd
.sta_id
= sta
->sta_id
;
1614 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1617 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1618 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1621 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1623 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1624 iwl_mvm_add_sta_cmd_size(mvm
),
1629 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1630 case ADD_STA_SUCCESS
:
1631 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1635 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1642 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
1644 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1645 mvm
->cfg
->base_params
->wd_timeout
:
1646 IWL_WATCHDOG_DISABLED
;
1649 lockdep_assert_held(&mvm
->mutex
);
1651 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1652 if (!iwl_mvm_is_dqa_supported(mvm
))
1653 iwl_mvm_enable_ac_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
,
1654 IWL_MVM_TX_FIFO_MCAST
, 0, wdg_timeout
);
1656 /* Allocate aux station and assign to it the aux queue */
1657 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
1658 NL80211_IFTYPE_UNSPECIFIED
);
1662 if (iwl_mvm_is_dqa_supported(mvm
)) {
1663 struct iwl_trans_txq_scd_cfg cfg
= {
1664 .fifo
= IWL_MVM_TX_FIFO_MCAST
,
1665 .sta_id
= mvm
->aux_sta
.sta_id
,
1666 .tid
= IWL_MAX_TID_COUNT
,
1668 .frame_limit
= IWL_FRAME_LIMIT
,
1671 iwl_mvm_enable_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
, 0, &cfg
,
1675 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
1679 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1683 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1685 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1687 lockdep_assert_held(&mvm
->mutex
);
1688 return iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1692 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1696 lockdep_assert_held(&mvm
->mutex
);
1698 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1700 IWL_WARN(mvm
, "Failed sending remove station\n");
1705 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1707 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1710 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1712 lockdep_assert_held(&mvm
->mutex
);
1714 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1718 * Send the add station command for the vif's broadcast station.
1719 * Assumes that the station was already allocated.
1721 * @mvm: the mvm component
1722 * @vif: the interface to which the broadcast station is added
1723 * @bsta: the broadcast station to add.
1725 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1727 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1728 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1729 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1730 const u8
*baddr
= _baddr
;
1732 lockdep_assert_held(&mvm
->mutex
);
1734 if (iwl_mvm_is_dqa_supported(mvm
)) {
1735 struct iwl_trans_txq_scd_cfg cfg
= {
1736 .fifo
= IWL_MVM_TX_FIFO_VO
,
1737 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1738 .tid
= IWL_MAX_TID_COUNT
,
1740 .frame_limit
= IWL_FRAME_LIMIT
,
1742 unsigned int wdg_timeout
=
1743 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1746 if ((vif
->type
== NL80211_IFTYPE_AP
) &&
1747 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1748 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
)))
1749 queue
= IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
;
1750 else if ((vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) &&
1751 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1752 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
)))
1753 queue
= IWL_MVM_DQA_P2P_DEVICE_QUEUE
;
1754 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1757 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0, &cfg
,
1761 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1762 baddr
= vif
->bss_conf
.bssid
;
1764 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_STATION_COUNT
))
1767 return iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1768 mvmvif
->id
, mvmvif
->color
);
1771 /* Send the FW a request to remove the station from it's internal data
1772 * structures, but DO NOT remove the entry from the local data structures. */
1773 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1775 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1778 lockdep_assert_held(&mvm
->mutex
);
1780 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1782 IWL_WARN(mvm
, "Failed sending remove station\n");
1786 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1788 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1791 lockdep_assert_held(&mvm
->mutex
);
1793 if (!iwl_mvm_is_dqa_supported(mvm
))
1794 qmask
= iwl_mvm_mac_get_queues_mask(vif
);
1796 if (vif
->type
== NL80211_IFTYPE_AP
) {
1798 * The firmware defines the TFD queue mask to only be relevant
1799 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1802 qmask
&= ~BIT(vif
->cab_queue
);
1804 if (iwl_mvm_is_dqa_supported(mvm
))
1805 qmask
|= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
);
1806 } else if (iwl_mvm_is_dqa_supported(mvm
) &&
1807 vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
1808 qmask
|= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
);
1811 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, qmask
,
1812 ieee80211_vif_type_p2p(vif
));
1815 /* Allocate a new station entry for the broadcast station to the given vif,
1816 * and send it to the FW.
1817 * Note that each P2P mac should have its own broadcast station.
1819 * @mvm: the mvm component
1820 * @vif: the interface to which the broadcast station is added
1821 * @bsta: the broadcast station to add. */
1822 int iwl_mvm_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1824 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1825 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1828 lockdep_assert_held(&mvm
->mutex
);
1830 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1834 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
1837 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
1842 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1844 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1846 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
1850 * Send the FW a request to remove the station from it's internal data
1851 * structures, and in addition remove it from the local data structure.
1853 int iwl_mvm_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1857 lockdep_assert_held(&mvm
->mutex
);
1859 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
1861 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
1866 #define IWL_MAX_RX_BA_SESSIONS 16
1868 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
1870 struct iwl_mvm_delba_notif notif
= {
1871 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
1875 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
1878 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
1879 struct iwl_mvm_baid_data
*data
)
1883 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
1885 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1887 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1888 &data
->reorder_buf
[i
];
1890 spin_lock_bh(&reorder_buf
->lock
);
1891 if (likely(!reorder_buf
->num_stored
)) {
1892 spin_unlock_bh(&reorder_buf
->lock
);
1897 * This shouldn't happen in regular DELBA since the internal
1898 * delBA notification should trigger a release of all frames in
1899 * the reorder buffer.
1903 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1904 __skb_queue_purge(&reorder_buf
->entries
[j
]);
1906 * Prevent timer re-arm. This prevents a very far fetched case
1907 * where we timed out on the notification. There may be prior
1908 * RX frames pending in the RX queue before the notification
1909 * that might get processed between now and the actual deletion
1910 * and we would re-arm the timer although we are deleting the
1913 reorder_buf
->removed
= true;
1914 spin_unlock_bh(&reorder_buf
->lock
);
1915 del_timer_sync(&reorder_buf
->reorder_timer
);
1919 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
1921 struct iwl_mvm_baid_data
*data
,
1922 u16 ssn
, u8 buf_size
)
1926 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1927 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1928 &data
->reorder_buf
[i
];
1931 reorder_buf
->num_stored
= 0;
1932 reorder_buf
->head_sn
= ssn
;
1933 reorder_buf
->buf_size
= buf_size
;
1934 /* rx reorder timer */
1935 reorder_buf
->reorder_timer
.function
=
1936 iwl_mvm_reorder_timer_expired
;
1937 reorder_buf
->reorder_timer
.data
= (unsigned long)reorder_buf
;
1938 init_timer(&reorder_buf
->reorder_timer
);
1939 spin_lock_init(&reorder_buf
->lock
);
1940 reorder_buf
->mvm
= mvm
;
1941 reorder_buf
->queue
= i
;
1942 reorder_buf
->sta_id
= sta_id
;
1943 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1944 __skb_queue_head_init(&reorder_buf
->entries
[j
]);
1948 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1949 int tid
, u16 ssn
, bool start
, u8 buf_size
, u16 timeout
)
1951 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1952 struct iwl_mvm_add_sta_cmd cmd
= {};
1953 struct iwl_mvm_baid_data
*baid_data
= NULL
;
1957 lockdep_assert_held(&mvm
->mutex
);
1959 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
1960 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
1964 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
1966 * Allocate here so if allocation fails we can bail out early
1967 * before starting the BA session in the firmware
1969 baid_data
= kzalloc(sizeof(*baid_data
) +
1970 mvm
->trans
->num_rx_queues
*
1971 sizeof(baid_data
->reorder_buf
[0]),
1977 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1978 cmd
.sta_id
= mvm_sta
->sta_id
;
1979 cmd
.add_modify
= STA_MODE_MODIFY
;
1981 cmd
.add_immediate_ba_tid
= (u8
) tid
;
1982 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
1983 cmd
.rx_ba_window
= cpu_to_le16((u16
)buf_size
);
1985 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
1987 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
1988 STA_MODIFY_REMOVE_BA_TID
;
1990 status
= ADD_STA_SUCCESS
;
1991 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1992 iwl_mvm_add_sta_cmd_size(mvm
),
1997 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1998 case ADD_STA_SUCCESS
:
1999 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2000 start
? "start" : "stopp");
2002 case ADD_STA_IMMEDIATE_BA_FAILURE
:
2003 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
2008 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
2009 start
? "start" : "stopp", status
);
2019 mvm
->rx_ba_sessions
++;
2021 if (!iwl_mvm_has_new_rx_api(mvm
))
2024 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
2028 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
2029 IWL_ADD_STA_BAID_SHIFT
);
2030 baid_data
->baid
= baid
;
2031 baid_data
->timeout
= timeout
;
2032 baid_data
->last_rx
= jiffies
;
2033 init_timer(&baid_data
->session_timer
);
2034 baid_data
->session_timer
.function
=
2035 iwl_mvm_rx_agg_session_expired
;
2036 baid_data
->session_timer
.data
=
2037 (unsigned long)&mvm
->baid_map
[baid
];
2038 baid_data
->mvm
= mvm
;
2039 baid_data
->tid
= tid
;
2040 baid_data
->sta_id
= mvm_sta
->sta_id
;
2042 mvm_sta
->tid_to_baid
[tid
] = baid
;
2044 mod_timer(&baid_data
->session_timer
,
2045 TU_TO_EXP_TIME(timeout
* 2));
2047 iwl_mvm_init_reorder_buffer(mvm
, mvm_sta
->sta_id
,
2048 baid_data
, ssn
, buf_size
);
2050 * protect the BA data with RCU to cover a case where our
2051 * internal RX sync mechanism will timeout (not that it's
2052 * supposed to happen) and we will free the session data while
2053 * RX is being processed in parallel
2055 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
2056 mvm_sta
->sta_id
, tid
, baid
);
2057 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
2058 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
2060 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
2062 if (mvm
->rx_ba_sessions
> 0)
2063 /* check that restart flow didn't zero the counter */
2064 mvm
->rx_ba_sessions
--;
2065 if (!iwl_mvm_has_new_rx_api(mvm
))
2068 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
2071 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
2072 if (WARN_ON(!baid_data
))
2075 /* synchronize all rx queues so we can safely delete */
2076 iwl_mvm_free_reorder(mvm
, baid_data
);
2077 del_timer_sync(&baid_data
->session_timer
);
2078 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
2079 kfree_rcu(baid_data
, rcu_head
);
2080 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
2089 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2090 int tid
, u8 queue
, bool start
)
2092 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2093 struct iwl_mvm_add_sta_cmd cmd
= {};
2097 lockdep_assert_held(&mvm
->mutex
);
2100 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
2101 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
2103 /* In DQA-mode the queue isn't removed on agg termination */
2104 if (!iwl_mvm_is_dqa_supported(mvm
))
2105 mvm_sta
->tfd_queue_msk
&= ~BIT(queue
);
2106 mvm_sta
->tid_disable_agg
|= BIT(tid
);
2109 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2110 cmd
.sta_id
= mvm_sta
->sta_id
;
2111 cmd
.add_modify
= STA_MODE_MODIFY
;
2112 cmd
.modify_mask
= STA_MODIFY_QUEUES
| STA_MODIFY_TID_DISABLE_TX
;
2113 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
2114 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
2116 status
= ADD_STA_SUCCESS
;
2117 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2118 iwl_mvm_add_sta_cmd_size(mvm
),
2123 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2124 case ADD_STA_SUCCESS
:
2128 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
2129 start
? "start" : "stopp", status
);
2136 const u8 tid_to_mac80211_ac
[] = {
2145 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
2148 static const u8 tid_to_ucode_ac
[] = {
2159 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2160 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
2162 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2163 struct iwl_mvm_tid_data
*tid_data
;
2167 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
2170 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
2171 IWL_ERR(mvm
, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2172 mvmsta
->tid_data
[tid
].state
);
2176 lockdep_assert_held(&mvm
->mutex
);
2178 spin_lock_bh(&mvmsta
->lock
);
2180 /* possible race condition - we entered D0i3 while starting agg */
2181 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
2182 spin_unlock_bh(&mvmsta
->lock
);
2183 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
2187 spin_lock(&mvm
->queue_info_lock
);
2190 * Note the possible cases:
2191 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2192 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2193 * one and mark it as reserved
2194 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2195 * non-DQA mode, since the TXQ hasn't yet been allocated
2197 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
2198 if (iwl_mvm_is_dqa_supported(mvm
) &&
2199 unlikely(mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_SHARED
)) {
2201 IWL_DEBUG_TX_QUEUES(mvm
,
2202 "Can't start tid %d agg on shared queue!\n",
2205 } else if (!iwl_mvm_is_dqa_supported(mvm
) ||
2206 mvm
->queue_info
[txq_id
].status
!= IWL_MVM_QUEUE_READY
) {
2207 txq_id
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
2208 mvm
->first_agg_queue
,
2209 mvm
->last_agg_queue
);
2212 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
2216 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2217 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
2220 spin_unlock(&mvm
->queue_info_lock
);
2222 IWL_DEBUG_TX_QUEUES(mvm
,
2223 "AGG for tid %d will be on queue #%d\n",
2226 tid_data
= &mvmsta
->tid_data
[tid
];
2227 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2228 tid_data
->txq_id
= txq_id
;
2229 *ssn
= tid_data
->ssn
;
2231 IWL_DEBUG_TX_QUEUES(mvm
,
2232 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2233 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
2234 tid_data
->next_reclaimed
);
2236 if (tid_data
->ssn
== tid_data
->next_reclaimed
) {
2237 tid_data
->state
= IWL_AGG_STARTING
;
2238 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2240 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
2247 spin_unlock(&mvm
->queue_info_lock
);
2249 spin_unlock_bh(&mvmsta
->lock
);
2254 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2255 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
,
2258 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2259 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2260 unsigned int wdg_timeout
=
2261 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
2263 bool alloc_queue
= true;
2264 enum iwl_mvm_queue_status queue_status
;
2267 struct iwl_trans_txq_scd_cfg cfg
= {
2268 .sta_id
= mvmsta
->sta_id
,
2270 .frame_limit
= buf_size
,
2274 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
2275 != IWL_MAX_TID_COUNT
);
2277 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
2279 spin_lock_bh(&mvmsta
->lock
);
2280 ssn
= tid_data
->ssn
;
2281 queue
= tid_data
->txq_id
;
2282 tid_data
->state
= IWL_AGG_ON
;
2283 mvmsta
->agg_tids
|= BIT(tid
);
2284 tid_data
->ssn
= 0xffff;
2285 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
2286 spin_unlock_bh(&mvmsta
->lock
);
2288 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
2290 spin_lock_bh(&mvm
->queue_info_lock
);
2291 queue_status
= mvm
->queue_info
[queue
].status
;
2292 spin_unlock_bh(&mvm
->queue_info_lock
);
2294 /* In DQA mode, the existing queue might need to be reconfigured */
2295 if (iwl_mvm_is_dqa_supported(mvm
)) {
2296 /* Maybe there is no need to even alloc a queue... */
2297 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
2298 alloc_queue
= false;
2301 * Only reconfig the SCD for the queue if the window size has
2302 * changed from current (become smaller)
2304 if (!alloc_queue
&& buf_size
< mvmsta
->max_agg_bufsize
) {
2306 * If reconfiguring an existing queue, it first must be
2309 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
2313 "Error draining queue before reconfig\n");
2317 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
2318 mvmsta
->sta_id
, tid
,
2322 "Error reconfiguring TXQ #%d\n", queue
);
2329 iwl_mvm_enable_txq(mvm
, queue
,
2330 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
2333 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2334 if (queue_status
!= IWL_MVM_QUEUE_SHARED
) {
2335 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2340 /* No need to mark as reserved */
2341 spin_lock_bh(&mvm
->queue_info_lock
);
2342 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
2343 spin_unlock_bh(&mvm
->queue_info_lock
);
2346 * Even though in theory the peer could have different
2347 * aggregation reorder buffer sizes for different sessions,
2348 * our ucode doesn't allow for that and has a global limit
2349 * for each station. Therefore, use the minimum of all the
2350 * aggregation sessions and our default value.
2352 mvmsta
->max_agg_bufsize
=
2353 min(mvmsta
->max_agg_bufsize
, buf_size
);
2354 mvmsta
->lq_sta
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2356 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2359 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.lq
, false);
2362 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2363 struct ieee80211_sta
*sta
, u16 tid
)
2365 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2366 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2371 * If mac80211 is cleaning its state, then say that we finished since
2372 * our state has been cleared anyway.
2374 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
2375 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2379 spin_lock_bh(&mvmsta
->lock
);
2381 txq_id
= tid_data
->txq_id
;
2383 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
2384 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2386 mvmsta
->agg_tids
&= ~BIT(tid
);
2388 spin_lock_bh(&mvm
->queue_info_lock
);
2390 * The TXQ is marked as reserved only if no traffic came through yet
2391 * This means no traffic has been sent on this TID (agg'd or not), so
2392 * we no longer have use for the queue. Since it hasn't even been
2393 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2396 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2397 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2399 spin_unlock_bh(&mvm
->queue_info_lock
);
2401 switch (tid_data
->state
) {
2403 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2405 IWL_DEBUG_TX_QUEUES(mvm
,
2406 "ssn = %d, next_recl = %d\n",
2407 tid_data
->ssn
, tid_data
->next_reclaimed
);
2409 /* There are still packets for this RA / TID in the HW */
2410 if (tid_data
->ssn
!= tid_data
->next_reclaimed
) {
2411 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_DELBA
;
2416 tid_data
->ssn
= 0xffff;
2417 tid_data
->state
= IWL_AGG_OFF
;
2418 spin_unlock_bh(&mvmsta
->lock
);
2420 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2422 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2424 if (!iwl_mvm_is_dqa_supported(mvm
)) {
2425 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
2427 iwl_mvm_disable_txq(mvm
, txq_id
, mac_queue
, tid
, 0);
2430 case IWL_AGG_STARTING
:
2431 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
2433 * The agg session has been stopped before it was set up. This
2434 * can happen when the AddBA timer times out for example.
2437 /* No barriers since we are under mutex */
2438 lockdep_assert_held(&mvm
->mutex
);
2440 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2441 tid_data
->state
= IWL_AGG_OFF
;
2446 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2447 mvmsta
->sta_id
, tid
, tid_data
->state
);
2449 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
2453 spin_unlock_bh(&mvmsta
->lock
);
2458 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2459 struct ieee80211_sta
*sta
, u16 tid
)
2461 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2462 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2464 enum iwl_mvm_agg_state old_state
;
2467 * First set the agg state to OFF to avoid calling
2468 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2470 spin_lock_bh(&mvmsta
->lock
);
2471 txq_id
= tid_data
->txq_id
;
2472 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
2473 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2474 old_state
= tid_data
->state
;
2475 tid_data
->state
= IWL_AGG_OFF
;
2476 mvmsta
->agg_tids
&= ~BIT(tid
);
2477 spin_unlock_bh(&mvmsta
->lock
);
2479 spin_lock_bh(&mvm
->queue_info_lock
);
2481 * The TXQ is marked as reserved only if no traffic came through yet
2482 * This means no traffic has been sent on this TID (agg'd or not), so
2483 * we no longer have use for the queue. Since it hasn't even been
2484 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2487 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2488 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2489 spin_unlock_bh(&mvm
->queue_info_lock
);
2491 if (old_state
>= IWL_AGG_ON
) {
2492 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
2493 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
2494 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2495 iwl_trans_wait_tx_queue_empty(mvm
->trans
,
2496 mvmsta
->tfd_queue_msk
);
2497 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
2499 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2501 if (!iwl_mvm_is_dqa_supported(mvm
)) {
2502 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
2504 iwl_mvm_disable_txq(mvm
, tid_data
->txq_id
, mac_queue
,
2512 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
2514 int i
, max
= -1, max_offs
= -1;
2516 lockdep_assert_held(&mvm
->mutex
);
2518 /* Pick the unused key offset with the highest 'deleted'
2519 * counter. Every time a key is deleted, all the counters
2520 * are incremented and the one that was just deleted is
2521 * reset to zero. Thus, the highest counter is the one
2522 * that was deleted longest ago. Pick that one.
2524 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2525 if (test_bit(i
, mvm
->fw_key_table
))
2527 if (mvm
->fw_key_deleted
[i
] > max
) {
2528 max
= mvm
->fw_key_deleted
[i
];
2534 return STA_KEY_IDX_INVALID
;
2539 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
2540 struct ieee80211_vif
*vif
,
2541 struct ieee80211_sta
*sta
)
2543 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2546 return iwl_mvm_sta_from_mac80211(sta
);
2549 * The device expects GTKs for station interfaces to be
2550 * installed as GTKs for the AP station. If we have no
2551 * station ID, then use AP's station ID.
2553 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2554 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2555 u8 sta_id
= mvmvif
->ap_sta_id
;
2557 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
2558 lockdep_is_held(&mvm
->mutex
));
2561 * It is possible that the 'sta' parameter is NULL,
2562 * for example when a GTK is removed - the sta_id will then
2563 * be the AP ID, and no station was passed by mac80211.
2565 if (IS_ERR_OR_NULL(sta
))
2568 return iwl_mvm_sta_from_mac80211(sta
);
2574 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
2575 struct iwl_mvm_sta
*mvm_sta
,
2576 struct ieee80211_key_conf
*keyconf
, bool mcast
,
2577 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
2580 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2586 u8 sta_id
= mvm_sta
->sta_id
;
2588 keyidx
= (keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2589 STA_KEY_FLG_KEYID_MSK
;
2590 key_flags
= cpu_to_le16(keyidx
);
2591 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
2593 switch (keyconf
->cipher
) {
2594 case WLAN_CIPHER_SUITE_TKIP
:
2595 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
2596 cmd
.tkip_rx_tsc_byte2
= tkip_iv32
;
2597 for (i
= 0; i
< 5; i
++)
2598 cmd
.tkip_rx_ttak
[i
] = cpu_to_le16(tkip_p1k
[i
]);
2599 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2601 case WLAN_CIPHER_SUITE_CCMP
:
2602 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
2603 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2605 case WLAN_CIPHER_SUITE_WEP104
:
2606 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
2608 case WLAN_CIPHER_SUITE_WEP40
:
2609 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
2610 memcpy(cmd
.key
+ 3, keyconf
->key
, keyconf
->keylen
);
2612 case WLAN_CIPHER_SUITE_GCMP_256
:
2613 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
2615 case WLAN_CIPHER_SUITE_GCMP
:
2616 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
2617 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2620 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
2621 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2625 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2627 cmd
.key_offset
= key_offset
;
2628 cmd
.key_flags
= key_flags
;
2629 cmd
.sta_id
= sta_id
;
2631 status
= ADD_STA_SUCCESS
;
2632 if (cmd_flags
& CMD_ASYNC
)
2633 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
,
2636 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2640 case ADD_STA_SUCCESS
:
2641 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
2645 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
2652 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
2653 struct ieee80211_key_conf
*keyconf
,
2654 u8 sta_id
, bool remove_key
)
2656 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
2658 /* verify the key details match the required command's expectations */
2659 if (WARN_ON((keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
2660 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5) ||
2661 (keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
&&
2662 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_128
&&
2663 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_256
)))
2666 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm
) &&
2667 keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
))
2670 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
2671 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
2674 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
2676 struct ieee80211_key_seq seq
;
2679 switch (keyconf
->cipher
) {
2680 case WLAN_CIPHER_SUITE_AES_CMAC
:
2681 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
2683 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
2684 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
2685 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_GCMP
);
2691 memcpy(igtk_cmd
.igtk
, keyconf
->key
, keyconf
->keylen
);
2692 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
2693 igtk_cmd
.ctrl_flags
|=
2694 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES
);
2695 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2696 pn
= seq
.aes_cmac
.pn
;
2697 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
2698 ((u64
) pn
[4] << 8) |
2699 ((u64
) pn
[3] << 16) |
2700 ((u64
) pn
[2] << 24) |
2701 ((u64
) pn
[1] << 32) |
2702 ((u64
) pn
[0] << 40));
2705 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
2706 remove_key
? "removing" : "installing",
2709 if (!iwl_mvm_has_new_rx_api(mvm
)) {
2710 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1
= {
2711 .ctrl_flags
= igtk_cmd
.ctrl_flags
,
2712 .key_id
= igtk_cmd
.key_id
,
2713 .sta_id
= igtk_cmd
.sta_id
,
2714 .receive_seq_cnt
= igtk_cmd
.receive_seq_cnt
2717 memcpy(igtk_cmd_v1
.igtk
, igtk_cmd
.igtk
,
2718 ARRAY_SIZE(igtk_cmd_v1
.igtk
));
2719 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
2720 sizeof(igtk_cmd_v1
), &igtk_cmd_v1
);
2722 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
2723 sizeof(igtk_cmd
), &igtk_cmd
);
2727 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
2728 struct ieee80211_vif
*vif
,
2729 struct ieee80211_sta
*sta
)
2731 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2736 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2737 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2738 u8 sta_id
= mvmvif
->ap_sta_id
;
2739 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2740 lockdep_is_held(&mvm
->mutex
));
2748 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2749 struct ieee80211_vif
*vif
,
2750 struct ieee80211_sta
*sta
,
2751 struct ieee80211_key_conf
*keyconf
,
2755 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2758 struct ieee80211_key_seq seq
;
2761 switch (keyconf
->cipher
) {
2762 case WLAN_CIPHER_SUITE_TKIP
:
2763 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
2764 /* get phase 1 key from mac80211 */
2765 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2766 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
2767 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2768 seq
.tkip
.iv32
, p1k
, 0, key_offset
);
2770 case WLAN_CIPHER_SUITE_CCMP
:
2771 case WLAN_CIPHER_SUITE_WEP40
:
2772 case WLAN_CIPHER_SUITE_WEP104
:
2773 case WLAN_CIPHER_SUITE_GCMP
:
2774 case WLAN_CIPHER_SUITE_GCMP_256
:
2775 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2776 0, NULL
, 0, key_offset
);
2779 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2780 0, NULL
, 0, key_offset
);
2786 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
2787 struct ieee80211_key_conf
*keyconf
,
2790 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2795 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2796 STA_KEY_FLG_KEYID_MSK
);
2797 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
2798 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
2801 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2803 cmd
.key_flags
= key_flags
;
2804 cmd
.key_offset
= keyconf
->hw_key_idx
;
2805 cmd
.sta_id
= sta_id
;
2807 status
= ADD_STA_SUCCESS
;
2808 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2812 case ADD_STA_SUCCESS
:
2813 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
2817 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
2824 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2825 struct ieee80211_vif
*vif
,
2826 struct ieee80211_sta
*sta
,
2827 struct ieee80211_key_conf
*keyconf
,
2830 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2831 struct iwl_mvm_sta
*mvm_sta
;
2834 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
2836 lockdep_assert_held(&mvm
->mutex
);
2838 /* Get the station id from the mvm local station table */
2839 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2841 IWL_ERR(mvm
, "Failed to find station\n");
2844 sta_id
= mvm_sta
->sta_id
;
2846 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
2847 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
2848 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
) {
2849 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
2854 * It is possible that the 'sta' parameter is NULL, and thus
2855 * there is a need to retrieve the sta from the local station table.
2858 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2859 lockdep_is_held(&mvm
->mutex
));
2860 if (IS_ERR_OR_NULL(sta
)) {
2861 IWL_ERR(mvm
, "Invalid station id\n");
2866 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
2869 /* If the key_offset is not pre-assigned, we need to find a
2870 * new offset to use. In normal cases, the offset is not
2871 * pre-assigned, but during HW_RESTART we want to reuse the
2872 * same indices, so we pass them when this function is called.
2874 * In D3 entry, we need to hardcoded the indices (because the
2875 * firmware hardcodes the PTK offset to 0). In this case, we
2876 * need to make sure we don't overwrite the hw_key_idx in the
2877 * keyconf structure, because otherwise we cannot configure
2878 * the original ones back when resuming.
2880 if (key_offset
== STA_KEY_IDX_INVALID
) {
2881 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
2882 if (key_offset
== STA_KEY_IDX_INVALID
)
2884 keyconf
->hw_key_idx
= key_offset
;
2887 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
2892 * For WEP, the same key is used for multicast and unicast. Upload it
2893 * again, using the same key offset, and now pointing the other one
2894 * to the same key slot (offset).
2895 * If this fails, remove the original as well.
2897 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2898 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) {
2899 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
2900 key_offset
, !mcast
);
2902 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2907 __set_bit(key_offset
, mvm
->fw_key_table
);
2910 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2911 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
2912 sta
? sta
->addr
: zero_addr
, ret
);
2916 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
2917 struct ieee80211_vif
*vif
,
2918 struct ieee80211_sta
*sta
,
2919 struct ieee80211_key_conf
*keyconf
)
2921 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2922 struct iwl_mvm_sta
*mvm_sta
;
2923 u8 sta_id
= IWL_MVM_STATION_COUNT
;
2926 lockdep_assert_held(&mvm
->mutex
);
2928 /* Get the station from the mvm local station table */
2929 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2931 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
2932 keyconf
->keyidx
, sta_id
);
2934 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
2935 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
2936 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
2937 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
2939 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
2940 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
2941 keyconf
->hw_key_idx
);
2945 /* track which key was deleted last */
2946 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2947 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
2948 mvm
->fw_key_deleted
[i
]++;
2950 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
2953 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
2957 sta_id
= mvm_sta
->sta_id
;
2959 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2963 /* delete WEP key twice to get rid of (now useless) offset */
2964 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2965 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
2966 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
2971 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
2972 struct ieee80211_vif
*vif
,
2973 struct ieee80211_key_conf
*keyconf
,
2974 struct ieee80211_sta
*sta
, u32 iv32
,
2977 struct iwl_mvm_sta
*mvm_sta
;
2978 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2982 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2983 if (WARN_ON_ONCE(!mvm_sta
))
2985 iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2986 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
);
2992 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
2993 struct ieee80211_sta
*sta
)
2995 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2996 struct iwl_mvm_add_sta_cmd cmd
= {
2997 .add_modify
= STA_MODE_MODIFY
,
2998 .sta_id
= mvmsta
->sta_id
,
2999 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
3000 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3004 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3005 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3007 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3010 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
3011 struct ieee80211_sta
*sta
,
3012 enum ieee80211_frame_release_type reason
,
3013 u16 cnt
, u16 tids
, bool more_data
,
3016 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3017 struct iwl_mvm_add_sta_cmd cmd
= {
3018 .add_modify
= STA_MODE_MODIFY
,
3019 .sta_id
= mvmsta
->sta_id
,
3020 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
3021 .sleep_tx_count
= cpu_to_le16(cnt
),
3022 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3025 unsigned long _tids
= tids
;
3027 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3028 * Note that this field is reserved and unused by firmware not
3029 * supporting GO uAPSD, so it's safe to always do this.
3031 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
3032 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
3034 /* If we're releasing frames from aggregation queues then check if the
3035 * all queues combined that we're releasing frames from have
3036 * - more frames than the service period, in which case more_data
3038 * - fewer than 'cnt' frames, in which case we need to adjust the
3039 * firmware command (but do that unconditionally)
3042 int remaining
= cnt
;
3045 spin_lock_bh(&mvmsta
->lock
);
3046 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
3047 struct iwl_mvm_tid_data
*tid_data
;
3050 tid_data
= &mvmsta
->tid_data
[tid
];
3051 if (WARN(tid_data
->state
!= IWL_AGG_ON
&&
3052 tid_data
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
,
3053 "TID %d state is %d\n",
3054 tid
, tid_data
->state
)) {
3055 spin_unlock_bh(&mvmsta
->lock
);
3056 ieee80211_sta_eosp(sta
);
3060 n_queued
= iwl_mvm_tid_queued(tid_data
);
3061 if (n_queued
> remaining
) {
3066 remaining
-= n_queued
;
3068 sleep_tx_count
= cnt
- remaining
;
3069 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
3070 mvmsta
->sleep_tx_count
= sleep_tx_count
;
3071 spin_unlock_bh(&mvmsta
->lock
);
3073 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
3074 if (WARN_ON(cnt
- remaining
== 0)) {
3075 ieee80211_sta_eosp(sta
);
3080 /* Note: this is ignored by firmware not supporting GO uAPSD */
3082 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_MOREDATA
);
3084 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
3085 mvmsta
->next_status_eosp
= true;
3086 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_PS_POLL
);
3088 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_UAPSD
);
3091 /* block the Tx queues until the FW updated the sleep Tx count */
3092 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
3094 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
3095 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
3096 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3098 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3101 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
3102 struct iwl_rx_cmd_buffer
*rxb
)
3104 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
3105 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
3106 struct ieee80211_sta
*sta
;
3107 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
3109 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
3113 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
3114 if (!IS_ERR_OR_NULL(sta
))
3115 ieee80211_sta_eosp(sta
);
3119 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3120 struct iwl_mvm_sta
*mvmsta
, bool disable
)
3122 struct iwl_mvm_add_sta_cmd cmd
= {
3123 .add_modify
= STA_MODE_MODIFY
,
3124 .sta_id
= mvmsta
->sta_id
,
3125 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3126 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3127 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3131 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3132 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3134 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3137 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
3138 struct ieee80211_sta
*sta
,
3141 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3143 spin_lock_bh(&mvm_sta
->lock
);
3145 if (mvm_sta
->disable_tx
== disable
) {
3146 spin_unlock_bh(&mvm_sta
->lock
);
3150 mvm_sta
->disable_tx
= disable
;
3153 * Tell mac80211 to start/stop queuing tx for this station,
3154 * but don't stop queuing if there are still pending frames
3157 if (disable
|| !atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
]))
3158 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
3160 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
3162 spin_unlock_bh(&mvm_sta
->lock
);
3165 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
3166 struct iwl_mvm_vif
*mvmvif
,
3169 struct ieee80211_sta
*sta
;
3170 struct iwl_mvm_sta
*mvm_sta
;
3173 lockdep_assert_held(&mvm
->mutex
);
3175 /* Block/unblock all the stations of the given mvmvif */
3176 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++) {
3177 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
3178 lockdep_is_held(&mvm
->mutex
));
3179 if (IS_ERR_OR_NULL(sta
))
3182 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3183 if (mvm_sta
->mac_id_n_color
!=
3184 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
3187 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
3191 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
3193 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3194 struct iwl_mvm_sta
*mvmsta
;
3198 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
3200 if (!WARN_ON(!mvmsta
))
3201 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);