1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
80 return iwl_mvm_has_new_rx_api(mvm
) ?
81 sizeof(struct iwl_mvm_add_sta_cmd
) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7
);
85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
86 enum nl80211_iftype iftype
)
91 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
92 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
94 lockdep_assert_held(&mvm
->mutex
);
96 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype
!= NL80211_IFTYPE_STATION
)
98 reserved_ids
= BIT(0);
100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 for (sta_id
= 0; sta_id
< IWL_MVM_STATION_COUNT
; sta_id
++) {
102 if (BIT(sta_id
) & reserved_ids
)
105 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
106 lockdep_is_held(&mvm
->mutex
)))
109 return IWL_MVM_STATION_COUNT
;
112 /* send station add/update command to firmware */
113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
114 bool update
, unsigned int flags
)
116 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
117 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
118 .sta_id
= mvm_sta
->sta_id
,
119 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
120 .add_modify
= update
? 1 : 0,
121 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
122 STA_FLG_MIMO_EN_MSK
),
123 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
127 u32 agg_size
= 0, mpdu_dens
= 0;
129 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
130 add_sta_cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
131 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
133 if (flags
& STA_MODIFY_QUEUES
)
134 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
137 switch (sta
->bandwidth
) {
138 case IEEE80211_STA_RX_BW_160
:
139 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
141 case IEEE80211_STA_RX_BW_80
:
142 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
144 case IEEE80211_STA_RX_BW_40
:
145 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
147 case IEEE80211_STA_RX_BW_20
:
148 if (sta
->ht_cap
.ht_supported
)
149 add_sta_cmd
.station_flags
|=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
154 switch (sta
->rx_nss
) {
156 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
159 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
162 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
166 switch (sta
->smps_mode
) {
167 case IEEE80211_SMPS_AUTOMATIC
:
168 case IEEE80211_SMPS_NUM_MODES
:
171 case IEEE80211_SMPS_STATIC
:
173 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
174 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
176 case IEEE80211_SMPS_DYNAMIC
:
177 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
179 case IEEE80211_SMPS_OFF
:
184 if (sta
->ht_cap
.ht_supported
) {
185 add_sta_cmd
.station_flags_msk
|=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
187 STA_FLG_AGG_MPDU_DENS_MSK
);
189 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
192 if (sta
->vht_cap
.vht_supported
) {
193 agg_size
= sta
->vht_cap
.cap
&
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
197 } else if (sta
->ht_cap
.ht_supported
) {
198 agg_size
= sta
->ht_cap
.ampdu_factor
;
201 add_sta_cmd
.station_flags
|=
202 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
203 add_sta_cmd
.station_flags
|=
204 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
206 status
= ADD_STA_SUCCESS
;
207 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
208 iwl_mvm_add_sta_cmd_size(mvm
),
209 &add_sta_cmd
, &status
);
213 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
214 case ADD_STA_SUCCESS
:
215 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
219 IWL_ERR(mvm
, "ADD_STA failed\n");
226 static void iwl_mvm_rx_agg_session_expired(unsigned long data
)
228 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= (void *)data
;
229 struct iwl_mvm_baid_data
*ba_data
;
230 struct ieee80211_sta
*sta
;
231 struct iwl_mvm_sta
*mvm_sta
;
232 unsigned long timeout
;
236 ba_data
= rcu_dereference(*rcu_ptr
);
238 if (WARN_ON(!ba_data
))
241 if (!ba_data
->timeout
)
244 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
245 if (time_is_after_jiffies(timeout
)) {
246 mod_timer(&ba_data
->session_timer
, timeout
);
251 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
252 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta
->vif
,
254 sta
->addr
, ba_data
->tid
);
259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm
*mvm
,
260 struct ieee80211_sta
*sta
)
262 unsigned long used_hw_queues
;
263 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
264 unsigned int wdg_timeout
=
265 iwl_mvm_get_wd_timeout(mvm
, NULL
, true, false);
268 lockdep_assert_held(&mvm
->mutex
);
270 used_hw_queues
= iwl_mvm_get_used_hw_queues(mvm
, NULL
);
272 /* Find available queues, and allocate them to the ACs */
273 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
274 u8 queue
= find_first_zero_bit(&used_hw_queues
,
275 mvm
->first_agg_queue
);
277 if (queue
>= mvm
->first_agg_queue
) {
278 IWL_ERR(mvm
, "Failed to allocate STA queue\n");
282 __set_bit(queue
, &used_hw_queues
);
283 mvmsta
->hw_queue
[ac
] = queue
;
286 /* Found a place for all queues - enable them */
287 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
288 iwl_mvm_enable_ac_txq(mvm
, mvmsta
->hw_queue
[ac
],
289 mvmsta
->hw_queue
[ac
],
290 iwl_mvm_ac_to_tx_fifo
[ac
], 0,
292 mvmsta
->tfd_queue_msk
|= BIT(mvmsta
->hw_queue
[ac
]);
298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm
*mvm
,
299 struct ieee80211_sta
*sta
)
301 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
302 unsigned long sta_msk
;
305 lockdep_assert_held(&mvm
->mutex
);
307 /* disable the TDLS STA-specific queues */
308 sta_msk
= mvmsta
->tfd_queue_msk
;
309 for_each_set_bit(i
, &sta_msk
, sizeof(sta_msk
) * BITS_PER_BYTE
)
310 iwl_mvm_disable_txq(mvm
, i
, i
, IWL_MAX_TID_COUNT
, 0);
313 /* Disable aggregations for a bitmap of TIDs for a given station */
314 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
315 unsigned long disable_agg_tids
,
318 struct iwl_mvm_add_sta_cmd cmd
= {};
319 struct ieee80211_sta
*sta
;
320 struct iwl_mvm_sta
*mvmsta
;
325 spin_lock_bh(&mvm
->queue_info_lock
);
326 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
327 spin_unlock_bh(&mvm
->queue_info_lock
);
331 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
333 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
338 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
340 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
342 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
343 cmd
.sta_id
= mvmsta
->sta_id
;
344 cmd
.add_modify
= STA_MODE_MODIFY
;
345 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
346 if (disable_agg_tids
)
347 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
349 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
350 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
351 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
355 /* Notify FW of queue removal from the STA queues */
356 status
= ADD_STA_SUCCESS
;
357 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
358 iwl_mvm_add_sta_cmd_size(mvm
),
364 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
366 struct ieee80211_sta
*sta
;
367 struct iwl_mvm_sta
*mvmsta
;
368 unsigned long tid_bitmap
;
369 unsigned long agg_tids
= 0;
373 lockdep_assert_held(&mvm
->mutex
);
375 spin_lock_bh(&mvm
->queue_info_lock
);
376 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
377 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
378 spin_unlock_bh(&mvm
->queue_info_lock
);
380 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
381 lockdep_is_held(&mvm
->mutex
));
383 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
386 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
388 spin_lock_bh(&mvmsta
->lock
);
389 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
390 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
391 agg_tids
|= BIT(tid
);
393 spin_unlock_bh(&mvmsta
->lock
);
399 * Remove a queue from a station's resources.
400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
401 * doesn't disable the queue
403 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
405 struct ieee80211_sta
*sta
;
406 struct iwl_mvm_sta
*mvmsta
;
407 unsigned long tid_bitmap
;
408 unsigned long disable_agg_tids
= 0;
412 lockdep_assert_held(&mvm
->mutex
);
414 spin_lock_bh(&mvm
->queue_info_lock
);
415 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
416 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
417 spin_unlock_bh(&mvm
->queue_info_lock
);
421 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
428 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
430 spin_lock_bh(&mvmsta
->lock
);
431 /* Unmap MAC queues and TIDs from this queue */
432 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
433 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
434 disable_agg_tids
|= BIT(tid
);
435 mvmsta
->tid_data
[tid
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
438 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
439 spin_unlock_bh(&mvmsta
->lock
);
443 spin_lock_bh(&mvm
->queue_info_lock
);
444 /* Unmap MAC queues and TIDs from this queue */
445 mvm
->queue_info
[queue
].hw_queue_to_mac80211
= 0;
446 mvm
->queue_info
[queue
].hw_queue_refcount
= 0;
447 mvm
->queue_info
[queue
].tid_bitmap
= 0;
448 spin_unlock_bh(&mvm
->queue_info_lock
);
450 return disable_agg_tids
;
453 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
454 unsigned long tfd_queue_mask
, u8 ac
)
457 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
460 lockdep_assert_held(&mvm
->queue_info_lock
);
462 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
464 /* See what ACs the existing queues for this STA have */
465 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
466 /* Only DATA queues can be shared */
467 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
468 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
471 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
475 * The queue to share is chosen only from DATA queues as follows (in
476 * descending priority):
479 * 3. Highest AC queue that is lower than new AC
480 * 4. Any existing AC (there always is at least 1 DATA queue)
483 /* Priority 1: An AC_BE queue */
484 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
485 queue
= ac_to_queue
[IEEE80211_AC_BE
];
486 /* Priority 2: Same AC queue */
487 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
488 queue
= ac_to_queue
[ac
];
489 /* Priority 3a: If new AC is VO and VI exists - use VI */
490 else if (ac
== IEEE80211_AC_VO
&&
491 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
492 queue
= ac_to_queue
[IEEE80211_AC_VI
];
493 /* Priority 3b: No BE so only AC less than the new one is BK */
494 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
495 queue
= ac_to_queue
[IEEE80211_AC_BK
];
496 /* Priority 4a: No BE nor BK - use VI if exists */
497 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
498 queue
= ac_to_queue
[IEEE80211_AC_VI
];
499 /* Priority 4b: No BE, BK nor VI - use VO if exists */
500 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
501 queue
= ac_to_queue
[IEEE80211_AC_VO
];
503 /* Make sure queue found (or not) is legal */
504 if (!((queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
&&
505 queue
<= IWL_MVM_DQA_MAX_MGMT_QUEUE
) ||
506 (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
&&
507 queue
<= IWL_MVM_DQA_MAX_DATA_QUEUE
) ||
508 (queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
))) {
509 IWL_ERR(mvm
, "No DATA queues available to share\n");
517 * If a given queue has a higher AC than the TID stream that is being added to
518 * it, the queue needs to be redirected to the lower AC. This function does that
519 * in such a case, otherwise - if no redirection required - it does nothing,
520 * unless the %force param is true.
522 static int iwl_mvm_scd_queue_redirect(struct iwl_mvm
*mvm
, int queue
, int tid
,
523 int ac
, int ssn
, unsigned int wdg_timeout
,
526 struct iwl_scd_txq_cfg_cmd cmd
= {
535 * If the AC is lower than current one - FIFO needs to be redirected to
536 * the lowest one of the streams in the queue. Check if this is needed
538 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
539 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
540 * we need to check if the numerical value of X is LARGER than of Y.
542 spin_lock_bh(&mvm
->queue_info_lock
);
543 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
544 spin_unlock_bh(&mvm
->queue_info_lock
);
546 IWL_DEBUG_TX_QUEUES(mvm
,
547 "No redirection needed on TXQ #%d\n",
552 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
553 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
554 mq
= mvm
->queue_info
[queue
].hw_queue_to_mac80211
;
555 shared_queue
= (mvm
->queue_info
[queue
].hw_queue_refcount
> 1);
556 spin_unlock_bh(&mvm
->queue_info_lock
);
558 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting shared TXQ #%d to FIFO #%d\n",
559 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
561 /* Stop MAC queues and wait for this queue to empty */
562 iwl_mvm_stop_mac_queues(mvm
, mq
);
563 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
, BIT(queue
));
565 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
571 /* Before redirecting the queue we need to de-activate it */
572 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
573 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
575 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
578 /* Make sure the SCD wrptr is correctly set before reconfiguring */
579 iwl_trans_txq_enable(mvm
->trans
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
580 cmd
.sta_id
, tid
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
,
583 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
585 /* Redirect to lower AC */
586 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
587 cmd
.sta_id
, tid
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
,
590 /* Update AC marking of the queue */
591 spin_lock_bh(&mvm
->queue_info_lock
);
592 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
593 spin_unlock_bh(&mvm
->queue_info_lock
);
596 * Mark queue as shared in transport if shared
597 * Note this has to be done after queue enablement because enablement
598 * can also set this value, and there is no indication there to shared
602 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
605 /* Continue using the MAC queues */
606 iwl_mvm_start_mac_queues(mvm
, mq
);
611 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
612 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
613 struct ieee80211_hdr
*hdr
)
615 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
616 struct iwl_trans_txq_scd_cfg cfg
= {
617 .fifo
= iwl_mvm_ac_to_tx_fifo
[ac
],
618 .sta_id
= mvmsta
->sta_id
,
620 .frame_limit
= IWL_FRAME_LIMIT
,
622 unsigned int wdg_timeout
=
623 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
624 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
626 bool using_inactive_queue
= false;
627 unsigned long disable_agg_tids
= 0;
628 enum iwl_mvm_agg_state queue_state
;
629 bool shared_queue
= false;
631 unsigned long tfd_queue_mask
;
634 lockdep_assert_held(&mvm
->mutex
);
636 spin_lock_bh(&mvmsta
->lock
);
637 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
638 spin_unlock_bh(&mvmsta
->lock
);
640 spin_lock_bh(&mvm
->queue_info_lock
);
643 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
646 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
647 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
648 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
649 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
650 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
651 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
652 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
655 /* If no such queue is found, we'll use a DATA queue instead */
658 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
659 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
660 IWL_MVM_QUEUE_RESERVED
||
661 mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
662 IWL_MVM_QUEUE_INACTIVE
)) {
663 queue
= mvmsta
->reserved_queue
;
664 mvm
->queue_info
[queue
].reserved
= true;
665 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
669 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
670 IWL_MVM_DQA_MIN_DATA_QUEUE
,
671 IWL_MVM_DQA_MAX_DATA_QUEUE
);
674 * Check if this queue is already allocated but inactive.
675 * In such a case, we'll need to first free this queue before enabling
676 * it again, so we'll mark it as reserved to make sure no new traffic
680 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
681 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
682 using_inactive_queue
= true;
683 IWL_DEBUG_TX_QUEUES(mvm
,
684 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
685 queue
, mvmsta
->sta_id
, tid
);
688 /* No free queue - we'll have to share */
690 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
693 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
698 * Mark TXQ as ready, even though it hasn't been fully configured yet,
699 * to make sure no one else takes it.
700 * This will allow avoiding re-acquiring the lock at the end of the
701 * configuration. On error we'll mark it back as free.
703 if ((queue
> 0) && !shared_queue
)
704 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
706 spin_unlock_bh(&mvm
->queue_info_lock
);
708 /* This shouldn't happen - out of queues */
709 if (WARN_ON(queue
<= 0)) {
710 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
716 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
717 * but for configuring the SCD to send A-MPDUs we need to mark the queue
719 * Mark all DATA queues as allowing to be aggregated at some point
721 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
722 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
725 * If this queue was previously inactive (idle) - we need to free it
728 if (using_inactive_queue
) {
729 struct iwl_scd_txq_cfg_cmd cmd
= {
735 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
737 spin_lock_bh(&mvm
->queue_info_lock
);
738 ac
= mvm
->queue_info
[queue
].mac80211_ac
;
739 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
740 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[ac
];
741 spin_unlock_bh(&mvm
->queue_info_lock
);
743 /* Disable the queue */
744 iwl_mvm_invalidate_sta_queue(mvm
, queue
, disable_agg_tids
,
746 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
747 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
),
751 "Failed to free inactive queue %d (ret=%d)\n",
754 /* Re-mark the inactive queue as inactive */
755 spin_lock_bh(&mvm
->queue_info_lock
);
756 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_INACTIVE
;
757 spin_unlock_bh(&mvm
->queue_info_lock
);
763 IWL_DEBUG_TX_QUEUES(mvm
,
764 "Allocating %squeue #%d to sta %d on tid %d\n",
765 shared_queue
? "shared " : "", queue
,
766 mvmsta
->sta_id
, tid
);
769 /* Disable any open aggs on this queue */
770 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
772 if (disable_agg_tids
) {
773 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
775 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
776 disable_agg_tids
, false);
780 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
781 iwl_mvm_enable_txq(mvm
, queue
, mac_queue
, ssn
, &cfg
,
785 * Mark queue as shared in transport if shared
786 * Note this has to be done after queue enablement because enablement
787 * can also set this value, and there is no indication there to shared
791 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
793 spin_lock_bh(&mvmsta
->lock
);
794 mvmsta
->tid_data
[tid
].txq_id
= queue
;
795 mvmsta
->tid_data
[tid
].is_tid_active
= true;
796 mvmsta
->tfd_queue_msk
|= BIT(queue
);
797 queue_state
= mvmsta
->tid_data
[tid
].state
;
799 if (mvmsta
->reserved_queue
== queue
)
800 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
801 spin_unlock_bh(&mvmsta
->lock
);
804 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
808 /* If we need to re-enable aggregations... */
809 if (queue_state
== IWL_AGG_ON
) {
810 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
815 /* Redirect queue, if needed */
816 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
, ac
, ssn
,
825 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
830 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
832 if (tid
== IWL_MAX_TID_COUNT
)
833 return IEEE80211_AC_VO
; /* MGMT */
835 return tid_to_mac80211_ac
[tid
];
838 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
839 struct ieee80211_sta
*sta
, int tid
)
841 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
842 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
844 struct ieee80211_hdr
*hdr
;
845 struct sk_buff_head deferred_tx
;
847 bool no_queue
= false; /* Marks if there is a problem with the queue */
850 lockdep_assert_held(&mvm
->mutex
);
852 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
855 hdr
= (void *)skb
->data
;
857 ac
= iwl_mvm_tid_to_ac_queue(tid
);
858 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
860 if (tid_data
->txq_id
== IEEE80211_INVAL_HW_QUEUE
&&
861 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
863 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
864 mvmsta
->sta_id
, tid
);
867 * Mark queue as problematic so later the deferred traffic is
868 * freed, as we can do nothing with it
873 __skb_queue_head_init(&deferred_tx
);
875 /* Disable bottom-halves when entering TX path */
877 spin_lock(&mvmsta
->lock
);
878 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
879 spin_unlock(&mvmsta
->lock
);
881 while ((skb
= __skb_dequeue(&deferred_tx
)))
882 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
883 ieee80211_free_txskb(mvm
->hw
, skb
);
887 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
890 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
892 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
894 struct ieee80211_sta
*sta
;
895 struct iwl_mvm_sta
*mvmsta
;
896 unsigned long deferred_tid_traffic
;
899 /* Check inactivity of queues */
900 iwl_mvm_inactivity_check(mvm
);
902 mutex_lock(&mvm
->mutex
);
904 /* Go over all stations with deferred traffic */
905 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
906 IWL_MVM_STATION_COUNT
) {
907 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
908 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
909 lockdep_is_held(&mvm
->mutex
));
910 if (IS_ERR_OR_NULL(sta
))
913 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
914 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
916 for_each_set_bit(tid
, &deferred_tid_traffic
,
917 IWL_MAX_TID_COUNT
+ 1)
918 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
921 mutex_unlock(&mvm
->mutex
);
924 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
925 struct ieee80211_sta
*sta
,
926 enum nl80211_iftype vif_type
)
928 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
932 * Check for inactive queues, so we don't reach a situation where we
933 * can't add a STA due to a shortage in queues that doesn't really exist
935 iwl_mvm_inactivity_check(mvm
);
937 spin_lock_bh(&mvm
->queue_info_lock
);
939 /* Make sure we have free resources for this STA */
940 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
941 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
942 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
944 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
946 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
947 IWL_MVM_DQA_MIN_DATA_QUEUE
,
948 IWL_MVM_DQA_MAX_DATA_QUEUE
);
950 spin_unlock_bh(&mvm
->queue_info_lock
);
951 IWL_ERR(mvm
, "No available queues for new station\n");
954 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
956 spin_unlock_bh(&mvm
->queue_info_lock
);
958 mvmsta
->reserved_queue
= queue
;
960 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
961 queue
, mvmsta
->sta_id
);
966 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
967 struct ieee80211_vif
*vif
,
968 struct ieee80211_sta
*sta
)
970 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
971 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
972 struct iwl_mvm_rxq_dup_data
*dup_data
;
975 lockdep_assert_held(&mvm
->mutex
);
977 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
978 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
979 ieee80211_vif_type_p2p(vif
));
981 sta_id
= mvm_sta
->sta_id
;
983 if (sta_id
== IWL_MVM_STATION_COUNT
)
986 spin_lock_init(&mvm_sta
->lock
);
988 mvm_sta
->sta_id
= sta_id
;
989 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
992 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
993 mvm_sta
->tx_protection
= 0;
994 mvm_sta
->tt_tx_protection
= false;
996 /* HW restart, don't assume the memory has been zeroed */
997 atomic_set(&mvm
->pending_frames
[sta_id
], 0);
998 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
999 mvm_sta
->tfd_queue_msk
= 0;
1002 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1003 * and then they'll be allocated dynamically
1005 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
) {
1006 ret
= iwl_mvm_tdls_sta_init(mvm
, sta
);
1009 } else if (!iwl_mvm_is_dqa_supported(mvm
)) {
1010 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++)
1011 if (vif
->hw_queue
[i
] != IEEE80211_INVAL_HW_QUEUE
)
1012 mvm_sta
->tfd_queue_msk
|= BIT(vif
->hw_queue
[i
]);
1015 /* for HW restart - reset everything but the sequence number */
1016 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1017 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
1018 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
1019 mvm_sta
->tid_data
[i
].seq_number
= seq
;
1021 if (!iwl_mvm_is_dqa_supported(mvm
))
1025 * Mark all queues for this STA as unallocated and defer TX
1026 * frames until the queue is allocated
1028 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
1029 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
1031 mvm_sta
->deferred_traffic_tid_map
= 0;
1032 mvm_sta
->agg_tids
= 0;
1034 if (iwl_mvm_has_new_rx_api(mvm
) &&
1035 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1036 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1041 mvm_sta
->dup_data
= dup_data
;
1044 if (iwl_mvm_is_dqa_supported(mvm
)) {
1045 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1046 ieee80211_vif_type_p2p(vif
));
1051 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, false, 0);
1055 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1057 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
);
1058 mvmvif
->ap_sta_id
= sta_id
;
1060 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_STATION_COUNT
);
1064 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1069 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
)
1070 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
1074 int iwl_mvm_update_sta(struct iwl_mvm
*mvm
,
1075 struct ieee80211_vif
*vif
,
1076 struct ieee80211_sta
*sta
)
1078 return iwl_mvm_sta_send_to_fw(mvm
, sta
, true, 0);
1081 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1084 struct iwl_mvm_add_sta_cmd cmd
= {};
1088 lockdep_assert_held(&mvm
->mutex
);
1090 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1091 cmd
.sta_id
= mvmsta
->sta_id
;
1092 cmd
.add_modify
= STA_MODE_MODIFY
;
1093 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1094 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1096 status
= ADD_STA_SUCCESS
;
1097 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1098 iwl_mvm_add_sta_cmd_size(mvm
),
1103 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1104 case ADD_STA_SUCCESS
:
1105 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1110 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1119 * Remove a station from the FW table. Before sending the command to remove
1120 * the station validate that the station is indeed known to the driver (sanity
1123 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1125 struct ieee80211_sta
*sta
;
1126 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1131 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1132 lockdep_is_held(&mvm
->mutex
));
1134 /* Note: internal stations are marked as error values */
1136 IWL_ERR(mvm
, "Invalid station id\n");
1140 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1141 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1143 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1150 void iwl_mvm_sta_drained_wk(struct work_struct
*wk
)
1152 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, sta_drained_wk
);
1156 * The mutex is needed because of the SYNC cmd, but not only: if the
1157 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1158 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1159 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1162 mutex_lock(&mvm
->mutex
);
1164 for_each_set_bit(sta_id
, mvm
->sta_drained
, IWL_MVM_STATION_COUNT
) {
1166 struct ieee80211_sta
*sta
=
1167 rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1168 lockdep_is_held(&mvm
->mutex
));
1171 * This station is in use or RCU-removed; the latter happens in
1172 * managed mode, where mac80211 removes the station before we
1173 * can remove it from firmware (we can only do that after the
1174 * MAC is marked unassociated), and possibly while the deauth
1175 * frame to disconnect from the AP is still queued. Then, the
1176 * station pointer is -ENOENT when the last skb is reclaimed.
1178 if (!IS_ERR(sta
) || PTR_ERR(sta
) == -ENOENT
)
1181 if (PTR_ERR(sta
) == -EINVAL
) {
1182 IWL_ERR(mvm
, "Drained sta %d, but it is internal?\n",
1188 IWL_ERR(mvm
, "Drained sta %d, but it was NULL?\n",
1193 WARN_ON(PTR_ERR(sta
) != -EBUSY
);
1194 /* This station was removed and we waited until it got drained,
1195 * we can now proceed and remove it.
1197 ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1200 "Couldn't remove sta %d after it was drained\n",
1204 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1205 clear_bit(sta_id
, mvm
->sta_drained
);
1207 if (mvm
->tfd_drained
[sta_id
]) {
1208 unsigned long i
, msk
= mvm
->tfd_drained
[sta_id
];
1210 for_each_set_bit(i
, &msk
, sizeof(msk
) * BITS_PER_BYTE
)
1211 iwl_mvm_disable_txq(mvm
, i
, i
,
1212 IWL_MAX_TID_COUNT
, 0);
1214 mvm
->tfd_drained
[sta_id
] = 0;
1215 IWL_DEBUG_TDLS(mvm
, "Drained sta %d, with queues %ld\n",
1220 mutex_unlock(&mvm
->mutex
);
1223 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1224 struct ieee80211_vif
*vif
,
1225 struct iwl_mvm_sta
*mvm_sta
)
1230 lockdep_assert_held(&mvm
->mutex
);
1232 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1233 if (mvm_sta
->tid_data
[i
].txq_id
== IEEE80211_INVAL_HW_QUEUE
)
1236 ac
= iwl_mvm_tid_to_ac_queue(i
);
1237 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
1238 vif
->hw_queue
[ac
], i
, 0);
1239 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
1243 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1244 struct ieee80211_vif
*vif
,
1245 struct ieee80211_sta
*sta
)
1247 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1248 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1251 lockdep_assert_held(&mvm
->mutex
);
1253 if (iwl_mvm_has_new_rx_api(mvm
))
1254 kfree(mvm_sta
->dup_data
);
1256 if ((vif
->type
== NL80211_IFTYPE_STATION
&&
1257 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) ||
1258 iwl_mvm_is_dqa_supported(mvm
)){
1259 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1262 /* flush its queues here since we are freeing mvm_sta */
1263 ret
= iwl_mvm_flush_tx_path(mvm
, mvm_sta
->tfd_queue_msk
, 0);
1266 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
1267 mvm_sta
->tfd_queue_msk
);
1270 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1272 /* If DQA is supported - the queues can be disabled now */
1273 if (iwl_mvm_is_dqa_supported(mvm
))
1274 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
1276 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1277 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) {
1278 /* if associated - we can't remove the AP STA now */
1279 if (vif
->bss_conf
.assoc
)
1282 /* unassoc - go ahead - remove the AP STA now */
1283 mvmvif
->ap_sta_id
= IWL_MVM_STATION_COUNT
;
1285 /* clear d0i3_ap_sta_id if no longer relevant */
1286 if (mvm
->d0i3_ap_sta_id
== mvm_sta
->sta_id
)
1287 mvm
->d0i3_ap_sta_id
= IWL_MVM_STATION_COUNT
;
1292 * This shouldn't happen - the TDLS channel switch should be canceled
1293 * before the STA is removed.
1295 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== mvm_sta
->sta_id
)) {
1296 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
1297 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1301 * Make sure that the tx response code sees the station as -EBUSY and
1302 * calls the drain worker.
1304 spin_lock_bh(&mvm_sta
->lock
);
1306 * There are frames pending on the AC queues for this station.
1307 * We need to wait until all the frames are drained...
1309 if (atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
])) {
1310 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
],
1312 spin_unlock_bh(&mvm_sta
->lock
);
1314 /* disable TDLS sta queues on drain complete */
1316 mvm
->tfd_drained
[mvm_sta
->sta_id
] =
1317 mvm_sta
->tfd_queue_msk
;
1318 IWL_DEBUG_TDLS(mvm
, "Draining TDLS sta %d\n",
1322 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1324 spin_unlock_bh(&mvm_sta
->lock
);
1326 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
)
1327 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
1329 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1330 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1336 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1337 struct ieee80211_vif
*vif
,
1340 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1342 lockdep_assert_held(&mvm
->mutex
);
1344 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1348 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1349 struct iwl_mvm_int_sta
*sta
,
1350 u32 qmask
, enum nl80211_iftype iftype
)
1352 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1353 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1354 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_STATION_COUNT
))
1358 sta
->tfd_queue_msk
= qmask
;
1360 /* put a non-NULL value so iterating over the stations won't stop */
1361 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1365 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
,
1366 struct iwl_mvm_int_sta
*sta
)
1368 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1369 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1370 sta
->sta_id
= IWL_MVM_STATION_COUNT
;
1373 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1374 struct iwl_mvm_int_sta
*sta
,
1376 u16 mac_id
, u16 color
)
1378 struct iwl_mvm_add_sta_cmd cmd
;
1382 lockdep_assert_held(&mvm
->mutex
);
1384 memset(&cmd
, 0, sizeof(cmd
));
1385 cmd
.sta_id
= sta
->sta_id
;
1386 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1389 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1390 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1393 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1395 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1396 iwl_mvm_add_sta_cmd_size(mvm
),
1401 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1402 case ADD_STA_SUCCESS
:
1403 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1407 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1414 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
1416 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1417 mvm
->cfg
->base_params
->wd_timeout
:
1418 IWL_WATCHDOG_DISABLED
;
1421 lockdep_assert_held(&mvm
->mutex
);
1423 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1424 if (!iwl_mvm_is_dqa_supported(mvm
))
1425 iwl_mvm_enable_ac_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
,
1426 IWL_MVM_TX_FIFO_MCAST
, 0, wdg_timeout
);
1428 /* Allocate aux station and assign to it the aux queue */
1429 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
1430 NL80211_IFTYPE_UNSPECIFIED
);
1434 if (iwl_mvm_is_dqa_supported(mvm
)) {
1435 struct iwl_trans_txq_scd_cfg cfg
= {
1436 .fifo
= IWL_MVM_TX_FIFO_MCAST
,
1437 .sta_id
= mvm
->aux_sta
.sta_id
,
1438 .tid
= IWL_MAX_TID_COUNT
,
1440 .frame_limit
= IWL_FRAME_LIMIT
,
1443 iwl_mvm_enable_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
, 0, &cfg
,
1447 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
1451 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1455 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1457 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1459 lockdep_assert_held(&mvm
->mutex
);
1460 return iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1464 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1468 lockdep_assert_held(&mvm
->mutex
);
1470 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1472 IWL_WARN(mvm
, "Failed sending remove station\n");
1477 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1479 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1482 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1484 lockdep_assert_held(&mvm
->mutex
);
1486 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1490 * Send the add station command for the vif's broadcast station.
1491 * Assumes that the station was already allocated.
1493 * @mvm: the mvm component
1494 * @vif: the interface to which the broadcast station is added
1495 * @bsta: the broadcast station to add.
1497 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1499 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1500 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1501 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1502 const u8
*baddr
= _baddr
;
1504 lockdep_assert_held(&mvm
->mutex
);
1506 if (iwl_mvm_is_dqa_supported(mvm
)) {
1507 struct iwl_trans_txq_scd_cfg cfg
= {
1508 .fifo
= IWL_MVM_TX_FIFO_VO
,
1509 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1510 .tid
= IWL_MAX_TID_COUNT
,
1512 .frame_limit
= IWL_FRAME_LIMIT
,
1514 unsigned int wdg_timeout
=
1515 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1518 if ((vif
->type
== NL80211_IFTYPE_AP
) &&
1519 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1520 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
)))
1521 queue
= IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
;
1522 else if ((vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) &&
1523 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1524 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
)))
1525 queue
= IWL_MVM_DQA_P2P_DEVICE_QUEUE
;
1526 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1529 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0, &cfg
,
1533 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1534 baddr
= vif
->bss_conf
.bssid
;
1536 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_STATION_COUNT
))
1539 return iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1540 mvmvif
->id
, mvmvif
->color
);
1543 /* Send the FW a request to remove the station from it's internal data
1544 * structures, but DO NOT remove the entry from the local data structures. */
1545 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1547 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1550 lockdep_assert_held(&mvm
->mutex
);
1552 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1554 IWL_WARN(mvm
, "Failed sending remove station\n");
1558 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1560 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1563 lockdep_assert_held(&mvm
->mutex
);
1565 if (!iwl_mvm_is_dqa_supported(mvm
))
1566 qmask
= iwl_mvm_mac_get_queues_mask(vif
);
1568 if (vif
->type
== NL80211_IFTYPE_AP
) {
1570 * The firmware defines the TFD queue mask to only be relevant
1571 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1574 qmask
&= ~BIT(vif
->cab_queue
);
1576 if (iwl_mvm_is_dqa_supported(mvm
))
1577 qmask
|= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
);
1578 } else if (iwl_mvm_is_dqa_supported(mvm
) &&
1579 vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
1580 qmask
|= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
);
1583 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, qmask
,
1584 ieee80211_vif_type_p2p(vif
));
1587 /* Allocate a new station entry for the broadcast station to the given vif,
1588 * and send it to the FW.
1589 * Note that each P2P mac should have its own broadcast station.
1591 * @mvm: the mvm component
1592 * @vif: the interface to which the broadcast station is added
1593 * @bsta: the broadcast station to add. */
1594 int iwl_mvm_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1596 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1597 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1600 lockdep_assert_held(&mvm
->mutex
);
1602 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1606 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
1609 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
1614 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1616 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1618 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
1622 * Send the FW a request to remove the station from it's internal data
1623 * structures, and in addition remove it from the local data structure.
1625 int iwl_mvm_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1629 lockdep_assert_held(&mvm
->mutex
);
1631 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
1633 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
1638 #define IWL_MAX_RX_BA_SESSIONS 16
1640 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
1642 struct iwl_mvm_delba_notif notif
= {
1643 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
1647 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
1650 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
1651 struct iwl_mvm_baid_data
*data
)
1655 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
1657 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1659 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1660 &data
->reorder_buf
[i
];
1662 spin_lock_bh(&reorder_buf
->lock
);
1663 if (likely(!reorder_buf
->num_stored
)) {
1664 spin_unlock_bh(&reorder_buf
->lock
);
1669 * This shouldn't happen in regular DELBA since the internal
1670 * delBA notification should trigger a release of all frames in
1671 * the reorder buffer.
1675 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1676 __skb_queue_purge(&reorder_buf
->entries
[j
]);
1678 * Prevent timer re-arm. This prevents a very far fetched case
1679 * where we timed out on the notification. There may be prior
1680 * RX frames pending in the RX queue before the notification
1681 * that might get processed between now and the actual deletion
1682 * and we would re-arm the timer although we are deleting the
1685 reorder_buf
->removed
= true;
1686 spin_unlock_bh(&reorder_buf
->lock
);
1687 del_timer_sync(&reorder_buf
->reorder_timer
);
1691 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
1693 struct iwl_mvm_baid_data
*data
,
1694 u16 ssn
, u8 buf_size
)
1698 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1699 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1700 &data
->reorder_buf
[i
];
1703 reorder_buf
->num_stored
= 0;
1704 reorder_buf
->head_sn
= ssn
;
1705 reorder_buf
->buf_size
= buf_size
;
1706 /* rx reorder timer */
1707 reorder_buf
->reorder_timer
.function
=
1708 iwl_mvm_reorder_timer_expired
;
1709 reorder_buf
->reorder_timer
.data
= (unsigned long)reorder_buf
;
1710 init_timer(&reorder_buf
->reorder_timer
);
1711 spin_lock_init(&reorder_buf
->lock
);
1712 reorder_buf
->mvm
= mvm
;
1713 reorder_buf
->queue
= i
;
1714 reorder_buf
->sta_id
= sta_id
;
1715 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1716 __skb_queue_head_init(&reorder_buf
->entries
[j
]);
1720 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1721 int tid
, u16 ssn
, bool start
, u8 buf_size
, u16 timeout
)
1723 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1724 struct iwl_mvm_add_sta_cmd cmd
= {};
1725 struct iwl_mvm_baid_data
*baid_data
= NULL
;
1729 lockdep_assert_held(&mvm
->mutex
);
1731 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
1732 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
1736 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
1738 * Allocate here so if allocation fails we can bail out early
1739 * before starting the BA session in the firmware
1741 baid_data
= kzalloc(sizeof(*baid_data
) +
1742 mvm
->trans
->num_rx_queues
*
1743 sizeof(baid_data
->reorder_buf
[0]),
1749 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1750 cmd
.sta_id
= mvm_sta
->sta_id
;
1751 cmd
.add_modify
= STA_MODE_MODIFY
;
1753 cmd
.add_immediate_ba_tid
= (u8
) tid
;
1754 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
1755 cmd
.rx_ba_window
= cpu_to_le16((u16
)buf_size
);
1757 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
1759 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
1760 STA_MODIFY_REMOVE_BA_TID
;
1762 status
= ADD_STA_SUCCESS
;
1763 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1764 iwl_mvm_add_sta_cmd_size(mvm
),
1769 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1770 case ADD_STA_SUCCESS
:
1771 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
1772 start
? "start" : "stopp");
1774 case ADD_STA_IMMEDIATE_BA_FAILURE
:
1775 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
1780 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
1781 start
? "start" : "stopp", status
);
1791 mvm
->rx_ba_sessions
++;
1793 if (!iwl_mvm_has_new_rx_api(mvm
))
1796 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
1800 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
1801 IWL_ADD_STA_BAID_SHIFT
);
1802 baid_data
->baid
= baid
;
1803 baid_data
->timeout
= timeout
;
1804 baid_data
->last_rx
= jiffies
;
1805 init_timer(&baid_data
->session_timer
);
1806 baid_data
->session_timer
.function
=
1807 iwl_mvm_rx_agg_session_expired
;
1808 baid_data
->session_timer
.data
=
1809 (unsigned long)&mvm
->baid_map
[baid
];
1810 baid_data
->mvm
= mvm
;
1811 baid_data
->tid
= tid
;
1812 baid_data
->sta_id
= mvm_sta
->sta_id
;
1814 mvm_sta
->tid_to_baid
[tid
] = baid
;
1816 mod_timer(&baid_data
->session_timer
,
1817 TU_TO_EXP_TIME(timeout
* 2));
1819 iwl_mvm_init_reorder_buffer(mvm
, mvm_sta
->sta_id
,
1820 baid_data
, ssn
, buf_size
);
1822 * protect the BA data with RCU to cover a case where our
1823 * internal RX sync mechanism will timeout (not that it's
1824 * supposed to happen) and we will free the session data while
1825 * RX is being processed in parallel
1827 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
1828 mvm_sta
->sta_id
, tid
, baid
);
1829 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
1830 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
1832 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
1834 if (mvm
->rx_ba_sessions
> 0)
1835 /* check that restart flow didn't zero the counter */
1836 mvm
->rx_ba_sessions
--;
1837 if (!iwl_mvm_has_new_rx_api(mvm
))
1840 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
1843 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
1844 if (WARN_ON(!baid_data
))
1847 /* synchronize all rx queues so we can safely delete */
1848 iwl_mvm_free_reorder(mvm
, baid_data
);
1849 del_timer_sync(&baid_data
->session_timer
);
1850 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
1851 kfree_rcu(baid_data
, rcu_head
);
1852 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
1861 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1862 int tid
, u8 queue
, bool start
)
1864 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1865 struct iwl_mvm_add_sta_cmd cmd
= {};
1869 lockdep_assert_held(&mvm
->mutex
);
1872 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
1873 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
1875 /* In DQA-mode the queue isn't removed on agg termination */
1876 if (!iwl_mvm_is_dqa_supported(mvm
))
1877 mvm_sta
->tfd_queue_msk
&= ~BIT(queue
);
1878 mvm_sta
->tid_disable_agg
|= BIT(tid
);
1881 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1882 cmd
.sta_id
= mvm_sta
->sta_id
;
1883 cmd
.add_modify
= STA_MODE_MODIFY
;
1884 cmd
.modify_mask
= STA_MODIFY_QUEUES
| STA_MODIFY_TID_DISABLE_TX
;
1885 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
1886 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
1888 status
= ADD_STA_SUCCESS
;
1889 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1890 iwl_mvm_add_sta_cmd_size(mvm
),
1895 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1896 case ADD_STA_SUCCESS
:
1900 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
1901 start
? "start" : "stopp", status
);
1908 const u8 tid_to_mac80211_ac
[] = {
1917 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
1920 static const u8 tid_to_ucode_ac
[] = {
1931 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1932 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
1934 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1935 struct iwl_mvm_tid_data
*tid_data
;
1939 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
1942 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
1943 IWL_ERR(mvm
, "Start AGG when state is not IWL_AGG_OFF %d!\n",
1944 mvmsta
->tid_data
[tid
].state
);
1948 lockdep_assert_held(&mvm
->mutex
);
1950 spin_lock_bh(&mvmsta
->lock
);
1952 /* possible race condition - we entered D0i3 while starting agg */
1953 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
1954 spin_unlock_bh(&mvmsta
->lock
);
1955 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
1959 spin_lock_bh(&mvm
->queue_info_lock
);
1962 * Note the possible cases:
1963 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
1964 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
1965 * one and mark it as reserved
1966 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
1967 * non-DQA mode, since the TXQ hasn't yet been allocated
1969 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
1970 if (!iwl_mvm_is_dqa_supported(mvm
) ||
1971 mvm
->queue_info
[txq_id
].status
!= IWL_MVM_QUEUE_READY
) {
1972 txq_id
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1973 mvm
->first_agg_queue
,
1974 mvm
->last_agg_queue
);
1977 spin_unlock_bh(&mvm
->queue_info_lock
);
1978 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
1982 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1983 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
1985 spin_unlock_bh(&mvm
->queue_info_lock
);
1987 IWL_DEBUG_TX_QUEUES(mvm
,
1988 "AGG for tid %d will be on queue #%d\n",
1991 tid_data
= &mvmsta
->tid_data
[tid
];
1992 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1993 tid_data
->txq_id
= txq_id
;
1994 *ssn
= tid_data
->ssn
;
1996 IWL_DEBUG_TX_QUEUES(mvm
,
1997 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
1998 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
1999 tid_data
->next_reclaimed
);
2001 if (tid_data
->ssn
== tid_data
->next_reclaimed
) {
2002 tid_data
->state
= IWL_AGG_STARTING
;
2003 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2005 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
2011 spin_unlock_bh(&mvmsta
->lock
);
2016 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2017 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
,
2020 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2021 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2022 unsigned int wdg_timeout
=
2023 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
2025 bool alloc_queue
= true;
2028 struct iwl_trans_txq_scd_cfg cfg
= {
2029 .sta_id
= mvmsta
->sta_id
,
2031 .frame_limit
= buf_size
,
2035 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
2036 != IWL_MAX_TID_COUNT
);
2038 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
2040 spin_lock_bh(&mvmsta
->lock
);
2041 ssn
= tid_data
->ssn
;
2042 queue
= tid_data
->txq_id
;
2043 tid_data
->state
= IWL_AGG_ON
;
2044 mvmsta
->agg_tids
|= BIT(tid
);
2045 tid_data
->ssn
= 0xffff;
2046 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
2047 spin_unlock_bh(&mvmsta
->lock
);
2049 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
2051 /* In DQA mode, the existing queue might need to be reconfigured */
2052 if (iwl_mvm_is_dqa_supported(mvm
)) {
2053 spin_lock_bh(&mvm
->queue_info_lock
);
2054 /* Maybe there is no need to even alloc a queue... */
2055 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
2056 alloc_queue
= false;
2057 spin_unlock_bh(&mvm
->queue_info_lock
);
2060 * Only reconfig the SCD for the queue if the window size has
2061 * changed from current (become smaller)
2063 if (!alloc_queue
&& buf_size
< mvmsta
->max_agg_bufsize
) {
2065 * If reconfiguring an existing queue, it first must be
2068 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
2072 "Error draining queue before reconfig\n");
2076 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
2077 mvmsta
->sta_id
, tid
,
2081 "Error reconfiguring TXQ #%d\n", queue
);
2088 iwl_mvm_enable_txq(mvm
, queue
,
2089 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
2092 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2096 /* No need to mark as reserved */
2097 spin_lock_bh(&mvm
->queue_info_lock
);
2098 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
2099 spin_unlock_bh(&mvm
->queue_info_lock
);
2102 * Even though in theory the peer could have different
2103 * aggregation reorder buffer sizes for different sessions,
2104 * our ucode doesn't allow for that and has a global limit
2105 * for each station. Therefore, use the minimum of all the
2106 * aggregation sessions and our default value.
2108 mvmsta
->max_agg_bufsize
=
2109 min(mvmsta
->max_agg_bufsize
, buf_size
);
2110 mvmsta
->lq_sta
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2112 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2115 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.lq
, false);
2118 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2119 struct ieee80211_sta
*sta
, u16 tid
)
2121 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2122 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2128 * If mac80211 is cleaning its state, then say that we finished since
2129 * our state has been cleared anyway.
2131 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
2132 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2136 spin_lock_bh(&mvmsta
->lock
);
2138 txq_id
= tid_data
->txq_id
;
2140 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
2141 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2143 mvmsta
->agg_tids
&= ~BIT(tid
);
2145 spin_lock_bh(&mvm
->queue_info_lock
);
2147 * The TXQ is marked as reserved only if no traffic came through yet
2148 * This means no traffic has been sent on this TID (agg'd or not), so
2149 * we no longer have use for the queue. Since it hasn't even been
2150 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2153 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2154 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2155 spin_unlock_bh(&mvm
->queue_info_lock
);
2157 switch (tid_data
->state
) {
2159 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2161 IWL_DEBUG_TX_QUEUES(mvm
,
2162 "ssn = %d, next_recl = %d\n",
2163 tid_data
->ssn
, tid_data
->next_reclaimed
);
2165 /* There are still packets for this RA / TID in the HW */
2166 if (tid_data
->ssn
!= tid_data
->next_reclaimed
) {
2167 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_DELBA
;
2172 tid_data
->ssn
= 0xffff;
2173 tid_data
->state
= IWL_AGG_OFF
;
2174 spin_unlock_bh(&mvmsta
->lock
);
2176 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2178 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2180 if (!iwl_mvm_is_dqa_supported(mvm
)) {
2181 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
2183 iwl_mvm_disable_txq(mvm
, txq_id
, mac_queue
, tid
, 0);
2186 case IWL_AGG_STARTING
:
2187 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
2189 * The agg session has been stopped before it was set up. This
2190 * can happen when the AddBA timer times out for example.
2193 /* No barriers since we are under mutex */
2194 lockdep_assert_held(&mvm
->mutex
);
2196 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2197 tid_data
->state
= IWL_AGG_OFF
;
2202 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2203 mvmsta
->sta_id
, tid
, tid_data
->state
);
2205 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
2209 spin_unlock_bh(&mvmsta
->lock
);
2214 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2215 struct ieee80211_sta
*sta
, u16 tid
)
2217 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2218 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2220 enum iwl_mvm_agg_state old_state
;
2223 * First set the agg state to OFF to avoid calling
2224 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2226 spin_lock_bh(&mvmsta
->lock
);
2227 txq_id
= tid_data
->txq_id
;
2228 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
2229 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2230 old_state
= tid_data
->state
;
2231 tid_data
->state
= IWL_AGG_OFF
;
2232 mvmsta
->agg_tids
&= ~BIT(tid
);
2233 spin_unlock_bh(&mvmsta
->lock
);
2235 spin_lock_bh(&mvm
->queue_info_lock
);
2237 * The TXQ is marked as reserved only if no traffic came through yet
2238 * This means no traffic has been sent on this TID (agg'd or not), so
2239 * we no longer have use for the queue. Since it hasn't even been
2240 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2243 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2244 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2245 spin_unlock_bh(&mvm
->queue_info_lock
);
2247 if (old_state
>= IWL_AGG_ON
) {
2248 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
2249 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
2250 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2251 iwl_trans_wait_tx_queue_empty(mvm
->trans
,
2252 mvmsta
->tfd_queue_msk
);
2253 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
2255 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2257 if (!iwl_mvm_is_dqa_supported(mvm
)) {
2258 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
2260 iwl_mvm_disable_txq(mvm
, tid_data
->txq_id
, mac_queue
,
2268 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
2270 int i
, max
= -1, max_offs
= -1;
2272 lockdep_assert_held(&mvm
->mutex
);
2274 /* Pick the unused key offset with the highest 'deleted'
2275 * counter. Every time a key is deleted, all the counters
2276 * are incremented and the one that was just deleted is
2277 * reset to zero. Thus, the highest counter is the one
2278 * that was deleted longest ago. Pick that one.
2280 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2281 if (test_bit(i
, mvm
->fw_key_table
))
2283 if (mvm
->fw_key_deleted
[i
] > max
) {
2284 max
= mvm
->fw_key_deleted
[i
];
2290 return STA_KEY_IDX_INVALID
;
2295 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
2296 struct ieee80211_vif
*vif
,
2297 struct ieee80211_sta
*sta
)
2299 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2302 return iwl_mvm_sta_from_mac80211(sta
);
2305 * The device expects GTKs for station interfaces to be
2306 * installed as GTKs for the AP station. If we have no
2307 * station ID, then use AP's station ID.
2309 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2310 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2311 u8 sta_id
= mvmvif
->ap_sta_id
;
2313 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
2314 lockdep_is_held(&mvm
->mutex
));
2317 * It is possible that the 'sta' parameter is NULL,
2318 * for example when a GTK is removed - the sta_id will then
2319 * be the AP ID, and no station was passed by mac80211.
2321 if (IS_ERR_OR_NULL(sta
))
2324 return iwl_mvm_sta_from_mac80211(sta
);
2330 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
2331 struct iwl_mvm_sta
*mvm_sta
,
2332 struct ieee80211_key_conf
*keyconf
, bool mcast
,
2333 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
2336 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2342 u8 sta_id
= mvm_sta
->sta_id
;
2344 keyidx
= (keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2345 STA_KEY_FLG_KEYID_MSK
;
2346 key_flags
= cpu_to_le16(keyidx
);
2347 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
2349 switch (keyconf
->cipher
) {
2350 case WLAN_CIPHER_SUITE_TKIP
:
2351 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
2352 cmd
.tkip_rx_tsc_byte2
= tkip_iv32
;
2353 for (i
= 0; i
< 5; i
++)
2354 cmd
.tkip_rx_ttak
[i
] = cpu_to_le16(tkip_p1k
[i
]);
2355 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2357 case WLAN_CIPHER_SUITE_CCMP
:
2358 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
2359 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2361 case WLAN_CIPHER_SUITE_WEP104
:
2362 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
2364 case WLAN_CIPHER_SUITE_WEP40
:
2365 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
2366 memcpy(cmd
.key
+ 3, keyconf
->key
, keyconf
->keylen
);
2368 case WLAN_CIPHER_SUITE_GCMP_256
:
2369 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
2371 case WLAN_CIPHER_SUITE_GCMP
:
2372 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
2373 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2376 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
2377 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2381 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2383 cmd
.key_offset
= key_offset
;
2384 cmd
.key_flags
= key_flags
;
2385 cmd
.sta_id
= sta_id
;
2387 status
= ADD_STA_SUCCESS
;
2388 if (cmd_flags
& CMD_ASYNC
)
2389 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
,
2392 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2396 case ADD_STA_SUCCESS
:
2397 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
2401 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
2408 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
2409 struct ieee80211_key_conf
*keyconf
,
2410 u8 sta_id
, bool remove_key
)
2412 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
2414 /* verify the key details match the required command's expectations */
2415 if (WARN_ON((keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
) ||
2416 (keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
2417 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5)))
2420 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
2421 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
2424 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
2426 struct ieee80211_key_seq seq
;
2429 switch (keyconf
->cipher
) {
2430 case WLAN_CIPHER_SUITE_AES_CMAC
:
2431 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
2437 memcpy(igtk_cmd
.IGTK
, keyconf
->key
, keyconf
->keylen
);
2438 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2439 pn
= seq
.aes_cmac
.pn
;
2440 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
2441 ((u64
) pn
[4] << 8) |
2442 ((u64
) pn
[3] << 16) |
2443 ((u64
) pn
[2] << 24) |
2444 ((u64
) pn
[1] << 32) |
2445 ((u64
) pn
[0] << 40));
2448 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
2449 remove_key
? "removing" : "installing",
2452 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
2453 sizeof(igtk_cmd
), &igtk_cmd
);
2457 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
2458 struct ieee80211_vif
*vif
,
2459 struct ieee80211_sta
*sta
)
2461 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2466 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2467 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2468 u8 sta_id
= mvmvif
->ap_sta_id
;
2469 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2470 lockdep_is_held(&mvm
->mutex
));
2478 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2479 struct ieee80211_vif
*vif
,
2480 struct ieee80211_sta
*sta
,
2481 struct ieee80211_key_conf
*keyconf
,
2485 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2488 struct ieee80211_key_seq seq
;
2491 switch (keyconf
->cipher
) {
2492 case WLAN_CIPHER_SUITE_TKIP
:
2493 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
2494 /* get phase 1 key from mac80211 */
2495 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2496 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
2497 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2498 seq
.tkip
.iv32
, p1k
, 0, key_offset
);
2500 case WLAN_CIPHER_SUITE_CCMP
:
2501 case WLAN_CIPHER_SUITE_WEP40
:
2502 case WLAN_CIPHER_SUITE_WEP104
:
2503 case WLAN_CIPHER_SUITE_GCMP
:
2504 case WLAN_CIPHER_SUITE_GCMP_256
:
2505 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2506 0, NULL
, 0, key_offset
);
2509 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2510 0, NULL
, 0, key_offset
);
2516 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
2517 struct ieee80211_key_conf
*keyconf
,
2520 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2525 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2526 STA_KEY_FLG_KEYID_MSK
);
2527 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
2528 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
2531 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2533 cmd
.key_flags
= key_flags
;
2534 cmd
.key_offset
= keyconf
->hw_key_idx
;
2535 cmd
.sta_id
= sta_id
;
2537 status
= ADD_STA_SUCCESS
;
2538 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2542 case ADD_STA_SUCCESS
:
2543 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
2547 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
2554 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2555 struct ieee80211_vif
*vif
,
2556 struct ieee80211_sta
*sta
,
2557 struct ieee80211_key_conf
*keyconf
,
2560 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2561 struct iwl_mvm_sta
*mvm_sta
;
2564 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
2566 lockdep_assert_held(&mvm
->mutex
);
2568 /* Get the station id from the mvm local station table */
2569 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2571 IWL_ERR(mvm
, "Failed to find station\n");
2574 sta_id
= mvm_sta
->sta_id
;
2576 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
) {
2577 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
2582 * It is possible that the 'sta' parameter is NULL, and thus
2583 * there is a need to retrieve the sta from the local station table.
2586 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2587 lockdep_is_held(&mvm
->mutex
));
2588 if (IS_ERR_OR_NULL(sta
)) {
2589 IWL_ERR(mvm
, "Invalid station id\n");
2594 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
2597 /* If the key_offset is not pre-assigned, we need to find a
2598 * new offset to use. In normal cases, the offset is not
2599 * pre-assigned, but during HW_RESTART we want to reuse the
2600 * same indices, so we pass them when this function is called.
2602 * In D3 entry, we need to hardcoded the indices (because the
2603 * firmware hardcodes the PTK offset to 0). In this case, we
2604 * need to make sure we don't overwrite the hw_key_idx in the
2605 * keyconf structure, because otherwise we cannot configure
2606 * the original ones back when resuming.
2608 if (key_offset
== STA_KEY_IDX_INVALID
) {
2609 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
2610 if (key_offset
== STA_KEY_IDX_INVALID
)
2612 keyconf
->hw_key_idx
= key_offset
;
2615 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
2620 * For WEP, the same key is used for multicast and unicast. Upload it
2621 * again, using the same key offset, and now pointing the other one
2622 * to the same key slot (offset).
2623 * If this fails, remove the original as well.
2625 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2626 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) {
2627 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
2628 key_offset
, !mcast
);
2630 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2635 __set_bit(key_offset
, mvm
->fw_key_table
);
2638 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2639 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
2640 sta
? sta
->addr
: zero_addr
, ret
);
2644 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
2645 struct ieee80211_vif
*vif
,
2646 struct ieee80211_sta
*sta
,
2647 struct ieee80211_key_conf
*keyconf
)
2649 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2650 struct iwl_mvm_sta
*mvm_sta
;
2651 u8 sta_id
= IWL_MVM_STATION_COUNT
;
2654 lockdep_assert_held(&mvm
->mutex
);
2656 /* Get the station from the mvm local station table */
2657 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2659 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
2660 keyconf
->keyidx
, sta_id
);
2662 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
)
2663 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
2665 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
2666 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
2667 keyconf
->hw_key_idx
);
2671 /* track which key was deleted last */
2672 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2673 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
2674 mvm
->fw_key_deleted
[i
]++;
2676 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
2679 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
2683 sta_id
= mvm_sta
->sta_id
;
2685 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2689 /* delete WEP key twice to get rid of (now useless) offset */
2690 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2691 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
2692 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
2697 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
2698 struct ieee80211_vif
*vif
,
2699 struct ieee80211_key_conf
*keyconf
,
2700 struct ieee80211_sta
*sta
, u32 iv32
,
2703 struct iwl_mvm_sta
*mvm_sta
;
2704 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2708 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2709 if (WARN_ON_ONCE(!mvm_sta
))
2711 iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2712 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
);
2718 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
2719 struct ieee80211_sta
*sta
)
2721 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2722 struct iwl_mvm_add_sta_cmd cmd
= {
2723 .add_modify
= STA_MODE_MODIFY
,
2724 .sta_id
= mvmsta
->sta_id
,
2725 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
2726 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2730 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
2731 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2733 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2736 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
2737 struct ieee80211_sta
*sta
,
2738 enum ieee80211_frame_release_type reason
,
2739 u16 cnt
, u16 tids
, bool more_data
,
2742 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2743 struct iwl_mvm_add_sta_cmd cmd
= {
2744 .add_modify
= STA_MODE_MODIFY
,
2745 .sta_id
= mvmsta
->sta_id
,
2746 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
2747 .sleep_tx_count
= cpu_to_le16(cnt
),
2748 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2751 unsigned long _tids
= tids
;
2753 /* convert TIDs to ACs - we don't support TSPEC so that's OK
2754 * Note that this field is reserved and unused by firmware not
2755 * supporting GO uAPSD, so it's safe to always do this.
2757 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
2758 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
2760 /* If we're releasing frames from aggregation queues then check if the
2761 * all queues combined that we're releasing frames from have
2762 * - more frames than the service period, in which case more_data
2764 * - fewer than 'cnt' frames, in which case we need to adjust the
2765 * firmware command (but do that unconditionally)
2768 int remaining
= cnt
;
2771 spin_lock_bh(&mvmsta
->lock
);
2772 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
2773 struct iwl_mvm_tid_data
*tid_data
;
2776 tid_data
= &mvmsta
->tid_data
[tid
];
2777 if (WARN(tid_data
->state
!= IWL_AGG_ON
&&
2778 tid_data
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
,
2779 "TID %d state is %d\n",
2780 tid
, tid_data
->state
)) {
2781 spin_unlock_bh(&mvmsta
->lock
);
2782 ieee80211_sta_eosp(sta
);
2786 n_queued
= iwl_mvm_tid_queued(tid_data
);
2787 if (n_queued
> remaining
) {
2792 remaining
-= n_queued
;
2794 sleep_tx_count
= cnt
- remaining
;
2795 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
2796 mvmsta
->sleep_tx_count
= sleep_tx_count
;
2797 spin_unlock_bh(&mvmsta
->lock
);
2799 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
2800 if (WARN_ON(cnt
- remaining
== 0)) {
2801 ieee80211_sta_eosp(sta
);
2806 /* Note: this is ignored by firmware not supporting GO uAPSD */
2808 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_MOREDATA
);
2810 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
2811 mvmsta
->next_status_eosp
= true;
2812 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_PS_POLL
);
2814 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_UAPSD
);
2817 /* block the Tx queues until the FW updated the sleep Tx count */
2818 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
2820 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
2821 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
2822 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2824 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2827 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
2828 struct iwl_rx_cmd_buffer
*rxb
)
2830 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
2831 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
2832 struct ieee80211_sta
*sta
;
2833 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
2835 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
2839 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
2840 if (!IS_ERR_OR_NULL(sta
))
2841 ieee80211_sta_eosp(sta
);
2845 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
2846 struct iwl_mvm_sta
*mvmsta
, bool disable
)
2848 struct iwl_mvm_add_sta_cmd cmd
= {
2849 .add_modify
= STA_MODE_MODIFY
,
2850 .sta_id
= mvmsta
->sta_id
,
2851 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
2852 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
2853 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2857 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
2858 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2860 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2863 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
2864 struct ieee80211_sta
*sta
,
2867 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2869 spin_lock_bh(&mvm_sta
->lock
);
2871 if (mvm_sta
->disable_tx
== disable
) {
2872 spin_unlock_bh(&mvm_sta
->lock
);
2876 mvm_sta
->disable_tx
= disable
;
2879 * Tell mac80211 to start/stop queuing tx for this station,
2880 * but don't stop queuing if there are still pending frames
2883 if (disable
|| !atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
]))
2884 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
2886 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
2888 spin_unlock_bh(&mvm_sta
->lock
);
2891 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
2892 struct iwl_mvm_vif
*mvmvif
,
2895 struct ieee80211_sta
*sta
;
2896 struct iwl_mvm_sta
*mvm_sta
;
2899 lockdep_assert_held(&mvm
->mutex
);
2901 /* Block/unblock all the stations of the given mvmvif */
2902 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++) {
2903 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
2904 lockdep_is_held(&mvm
->mutex
));
2905 if (IS_ERR_OR_NULL(sta
))
2908 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2909 if (mvm_sta
->mac_id_n_color
!=
2910 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
2913 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
2917 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2919 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2920 struct iwl_mvm_sta
*mvmsta
;
2924 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
2926 if (!WARN_ON(!mvmsta
))
2927 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);