1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <linuxwifi@intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
65 #include <linux/module.h>
66 #include <linux/vmalloc.h>
67 #include <net/mac80211.h>
69 #include "iwl-notif-wait.h"
70 #include "iwl-trans.h"
71 #include "iwl-op-mode.h"
73 #include "iwl-debug.h"
75 #include "iwl-modparams.h"
77 #include "iwl-phy-db.h"
78 #include "iwl-eeprom-parse.h"
83 #include "fw-api-scan.h"
84 #include "time-event.h"
87 #include "fw-api-scan.h"
89 #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
90 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
91 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
92 MODULE_LICENSE("GPL");
94 static const struct iwl_op_mode_ops iwl_mvm_ops
;
95 static const struct iwl_op_mode_ops iwl_mvm_ops_mq
;
97 struct iwl_mvm_mod_params iwlmvm_mod_params
= {
98 .power_scheme
= IWL_POWER_SCHEME_BPS
,
99 .tfd_q_hang_detect
= true
100 /* rest of fields are 0 by default */
103 module_param_named(init_dbg
, iwlmvm_mod_params
.init_dbg
, bool, S_IRUGO
);
104 MODULE_PARM_DESC(init_dbg
,
105 "set to true to debug an ASSERT in INIT fw (default: false");
106 module_param_named(power_scheme
, iwlmvm_mod_params
.power_scheme
, int, S_IRUGO
);
107 MODULE_PARM_DESC(power_scheme
,
108 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
109 module_param_named(tfd_q_hang_detect
, iwlmvm_mod_params
.tfd_q_hang_detect
,
111 MODULE_PARM_DESC(tfd_q_hang_detect
,
112 "TFD queues hang detection (default: true");
115 * module init and exit functions
117 static int __init
iwl_mvm_init(void)
121 ret
= iwl_mvm_rate_control_register();
123 pr_err("Unable to register rate control algorithm: %d\n", ret
);
127 ret
= iwl_opmode_register("iwlmvm", &iwl_mvm_ops
);
130 pr_err("Unable to register MVM op_mode: %d\n", ret
);
131 iwl_mvm_rate_control_unregister();
136 module_init(iwl_mvm_init
);
138 static void __exit
iwl_mvm_exit(void)
140 iwl_opmode_deregister("iwlmvm");
141 iwl_mvm_rate_control_unregister();
143 module_exit(iwl_mvm_exit
);
145 static void iwl_mvm_nic_config(struct iwl_op_mode
*op_mode
)
147 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
148 u8 radio_cfg_type
, radio_cfg_step
, radio_cfg_dash
;
150 u32 phy_config
= iwl_mvm_get_phy_config(mvm
);
152 radio_cfg_type
= (phy_config
& FW_PHY_CFG_RADIO_TYPE
) >>
153 FW_PHY_CFG_RADIO_TYPE_POS
;
154 radio_cfg_step
= (phy_config
& FW_PHY_CFG_RADIO_STEP
) >>
155 FW_PHY_CFG_RADIO_STEP_POS
;
156 radio_cfg_dash
= (phy_config
& FW_PHY_CFG_RADIO_DASH
) >>
157 FW_PHY_CFG_RADIO_DASH_POS
;
160 reg_val
|= CSR_HW_REV_STEP(mvm
->trans
->hw_rev
) <<
161 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP
;
162 reg_val
|= CSR_HW_REV_DASH(mvm
->trans
->hw_rev
) <<
163 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH
;
165 /* radio configuration */
166 reg_val
|= radio_cfg_type
<< CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE
;
167 reg_val
|= radio_cfg_step
<< CSR_HW_IF_CONFIG_REG_POS_PHY_STEP
;
168 reg_val
|= radio_cfg_dash
<< CSR_HW_IF_CONFIG_REG_POS_PHY_DASH
;
170 WARN_ON((radio_cfg_type
<< CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE
) &
171 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE
);
174 * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
175 * shouldn't be set to any non-zero value. The same is supposed to be
176 * true of the other HW, but unsetting them (such as the 7260) causes
177 * automatic tests to fail on seemingly unrelated errors. Need to
178 * further investigate this, but for now we'll separate cases.
180 if (mvm
->trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
181 reg_val
|= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI
;
183 iwl_trans_set_bits_mask(mvm
->trans
, CSR_HW_IF_CONFIG_REG
,
184 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH
|
185 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP
|
186 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE
|
187 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP
|
188 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH
|
189 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI
|
190 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI
,
193 IWL_DEBUG_INFO(mvm
, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type
,
194 radio_cfg_step
, radio_cfg_dash
);
197 * W/A : NIC is stuck in a reset state after Early PCIe power off
198 * (PCIe power is lost before PERST# is asserted), causing ME FW
199 * to lose ownership and not being able to obtain it back.
201 if (!mvm
->trans
->cfg
->apmg_not_supported
)
202 iwl_set_bits_mask_prph(mvm
->trans
, APMG_PS_CTRL_REG
,
203 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS
,
204 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS
);
207 struct iwl_rx_handlers
{
210 void (*fn
)(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
);
213 #define RX_HANDLER(_cmd_id, _fn, _async) \
214 { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
215 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \
216 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
219 * Handlers for fw notifications
220 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
221 * This list should be in order of frequency for performance purposes.
223 * The handler can be SYNC - this means that it will be called in the Rx path
224 * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
225 * only in this case!), it should be set as ASYNC. In that case, it will be
226 * called from a worker with mvm->mutex held.
228 static const struct iwl_rx_handlers iwl_mvm_rx_handlers
[] = {
229 RX_HANDLER(TX_CMD
, iwl_mvm_rx_tx_cmd
, false),
230 RX_HANDLER(BA_NOTIF
, iwl_mvm_rx_ba_notif
, false),
232 RX_HANDLER(BT_PROFILE_NOTIFICATION
, iwl_mvm_rx_bt_coex_notif
, true),
233 RX_HANDLER(BEACON_NOTIFICATION
, iwl_mvm_rx_beacon_notif
, true),
234 RX_HANDLER(STATISTICS_NOTIFICATION
, iwl_mvm_rx_statistics
, true),
235 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION
,
236 iwl_mvm_rx_ant_coupling_notif
, true),
238 RX_HANDLER(TIME_EVENT_NOTIFICATION
, iwl_mvm_rx_time_event_notif
, false),
239 RX_HANDLER(MCC_CHUB_UPDATE_CMD
, iwl_mvm_rx_chub_update_mcc
, true),
241 RX_HANDLER(EOSP_NOTIFICATION
, iwl_mvm_rx_eosp_notif
, false),
243 RX_HANDLER(SCAN_ITERATION_COMPLETE
,
244 iwl_mvm_rx_lmac_scan_iter_complete_notif
, false),
245 RX_HANDLER(SCAN_OFFLOAD_COMPLETE
,
246 iwl_mvm_rx_lmac_scan_complete_notif
, true),
247 RX_HANDLER(MATCH_FOUND_NOTIFICATION
, iwl_mvm_rx_scan_match_found
,
249 RX_HANDLER(SCAN_COMPLETE_UMAC
, iwl_mvm_rx_umac_scan_complete_notif
,
251 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC
,
252 iwl_mvm_rx_umac_scan_iter_complete_notif
, false),
254 RX_HANDLER(CARD_STATE_NOTIFICATION
, iwl_mvm_rx_card_state_notif
, false),
256 RX_HANDLER(MISSED_BEACONS_NOTIFICATION
, iwl_mvm_rx_missed_beacons_notif
,
259 RX_HANDLER(REPLY_ERROR
, iwl_mvm_rx_fw_error
, false),
260 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION
,
261 iwl_mvm_power_uapsd_misbehaving_ap_notif
, false),
262 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION
, iwl_mvm_temp_notif
, true),
263 RX_HANDLER_GRP(PHY_OPS_GROUP
, DTS_MEASUREMENT_NOTIF_WIDE
,
264 iwl_mvm_temp_notif
, true),
266 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION
, iwl_mvm_rx_tdls_notif
,
268 RX_HANDLER(MFUART_LOAD_NOTIFICATION
, iwl_mvm_rx_mfuart_notif
, false),
269 RX_HANDLER(TOF_NOTIFICATION
, iwl_mvm_tof_resp_handler
, true),
273 #undef RX_HANDLER_GRP
275 /* Please keep this array *SORTED* by hex value.
276 * Access is done through binary search
278 static const struct iwl_hcmd_names iwl_mvm_legacy_names
[] = {
279 HCMD_NAME(MVM_ALIVE
),
280 HCMD_NAME(REPLY_ERROR
),
282 HCMD_NAME(INIT_COMPLETE_NOTIF
),
283 HCMD_NAME(PHY_CONTEXT_CMD
),
285 HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION
),
286 HCMD_NAME(SCAN_CFG_CMD
),
287 HCMD_NAME(SCAN_REQ_UMAC
),
288 HCMD_NAME(SCAN_ABORT_UMAC
),
289 HCMD_NAME(SCAN_COMPLETE_UMAC
),
291 HCMD_NAME(TOF_NOTIFICATION
),
292 HCMD_NAME(ADD_STA_KEY
),
294 HCMD_NAME(REMOVE_STA
),
295 HCMD_NAME(FW_GET_ITEM_CMD
),
297 HCMD_NAME(SCD_QUEUE_CFG
),
298 HCMD_NAME(TXPATH_FLUSH
),
299 HCMD_NAME(MGMT_MCAST_KEY
),
301 HCMD_NAME(SHARED_MEM_CFG
),
302 HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD
),
303 HCMD_NAME(MAC_CONTEXT_CMD
),
304 HCMD_NAME(TIME_EVENT_CMD
),
305 HCMD_NAME(TIME_EVENT_NOTIFICATION
),
306 HCMD_NAME(BINDING_CONTEXT_CMD
),
307 HCMD_NAME(TIME_QUOTA_CMD
),
308 HCMD_NAME(NON_QOS_TX_COUNTER_CMD
),
310 HCMD_NAME(FW_PAGING_BLOCK_CMD
),
311 HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD
),
312 HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD
),
313 HCMD_NAME(HOT_SPOT_CMD
),
314 HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD
),
315 HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD
),
316 HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD
),
317 HCMD_NAME(BT_COEX_UPDATE_SW_BOOST
),
318 HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT
),
319 HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP
),
320 HCMD_NAME(BT_COEX_CI
),
321 HCMD_NAME(PHY_CONFIGURATION_CMD
),
322 HCMD_NAME(CALIB_RES_NOTIF_PHY_DB
),
323 HCMD_NAME(SCAN_OFFLOAD_COMPLETE
),
324 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD
),
325 HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD
),
326 HCMD_NAME(POWER_TABLE_CMD
),
327 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION
),
328 HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF
),
329 HCMD_NAME(DC2DC_CONFIG_CMD
),
330 HCMD_NAME(NVM_ACCESS_CMD
),
331 HCMD_NAME(SET_CALIB_DEFAULT_CMD
),
332 HCMD_NAME(BEACON_NOTIFICATION
),
333 HCMD_NAME(BEACON_TEMPLATE_CMD
),
334 HCMD_NAME(TX_ANT_CONFIGURATION_CMD
),
335 HCMD_NAME(BT_CONFIG
),
336 HCMD_NAME(STATISTICS_CMD
),
337 HCMD_NAME(STATISTICS_NOTIFICATION
),
338 HCMD_NAME(EOSP_NOTIFICATION
),
339 HCMD_NAME(REDUCE_TX_POWER_CMD
),
340 HCMD_NAME(CARD_STATE_CMD
),
341 HCMD_NAME(CARD_STATE_NOTIFICATION
),
342 HCMD_NAME(MISSED_BEACONS_NOTIFICATION
),
343 HCMD_NAME(TDLS_CONFIG_CMD
),
344 HCMD_NAME(MAC_PM_POWER_TABLE
),
345 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION
),
346 HCMD_NAME(MFUART_LOAD_NOTIFICATION
),
347 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC
),
348 HCMD_NAME(REPLY_RX_PHY_CMD
),
349 HCMD_NAME(REPLY_RX_MPDU_CMD
),
351 HCMD_NAME(MCC_UPDATE_CMD
),
352 HCMD_NAME(MCC_CHUB_UPDATE_CMD
),
353 HCMD_NAME(MARKER_CMD
),
354 HCMD_NAME(BT_COEX_PRIO_TABLE
),
355 HCMD_NAME(BT_COEX_PROT_ENV
),
356 HCMD_NAME(BT_PROFILE_NOTIFICATION
),
357 HCMD_NAME(BCAST_FILTER_CMD
),
358 HCMD_NAME(MCAST_FILTER_CMD
),
359 HCMD_NAME(REPLY_SF_CFG_CMD
),
360 HCMD_NAME(REPLY_BEACON_FILTERING_CMD
),
361 HCMD_NAME(D3_CONFIG_CMD
),
362 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD
),
363 HCMD_NAME(OFFLOADS_QUERY_CMD
),
364 HCMD_NAME(REMOTE_WAKE_CONFIG_CMD
),
365 HCMD_NAME(MATCH_FOUND_NOTIFICATION
),
366 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER
),
367 HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION
),
368 HCMD_NAME(WOWLAN_PATTERNS
),
369 HCMD_NAME(WOWLAN_CONFIGURATION
),
370 HCMD_NAME(WOWLAN_TSC_RSC_PARAM
),
371 HCMD_NAME(WOWLAN_TKIP_PARAM
),
372 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL
),
373 HCMD_NAME(WOWLAN_GET_STATUSES
),
374 HCMD_NAME(WOWLAN_TX_POWER_PER_DB
),
375 HCMD_NAME(SCAN_ITERATION_COMPLETE
),
376 HCMD_NAME(D0I3_END_CMD
),
377 HCMD_NAME(LTR_CONFIG
),
378 HCMD_NAME(REPLY_DEBUG_CMD
),
381 /* Please keep this array *SORTED* by hex value.
382 * Access is done through binary search
384 static const struct iwl_hcmd_names iwl_mvm_phy_names
[] = {
385 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE
),
386 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE
),
389 static const struct iwl_hcmd_arr iwl_mvm_groups
[] = {
390 [LEGACY_GROUP
] = HCMD_ARR(iwl_mvm_legacy_names
),
391 [LONG_GROUP
] = HCMD_ARR(iwl_mvm_legacy_names
),
392 [PHY_OPS_GROUP
] = HCMD_ARR(iwl_mvm_phy_names
),
396 /* this forward declaration can avoid to export the function */
397 static void iwl_mvm_async_handlers_wk(struct work_struct
*wk
);
398 static void iwl_mvm_d0i3_exit_work(struct work_struct
*wk
);
400 static u32
calc_min_backoff(struct iwl_trans
*trans
, const struct iwl_cfg
*cfg
)
402 const struct iwl_pwr_tx_backoff
*pwr_tx_backoff
= cfg
->pwr_tx_backoffs
;
407 while (pwr_tx_backoff
->pwr
) {
408 if (trans
->dflt_pwr_limit
>= pwr_tx_backoff
->pwr
)
409 return pwr_tx_backoff
->backoff
;
417 static void iwl_mvm_fw_error_dump_wk(struct work_struct
*work
);
419 static struct iwl_op_mode
*
420 iwl_op_mode_mvm_start(struct iwl_trans
*trans
, const struct iwl_cfg
*cfg
,
421 const struct iwl_fw
*fw
, struct dentry
*dbgfs_dir
)
423 struct ieee80211_hw
*hw
;
424 struct iwl_op_mode
*op_mode
;
426 struct iwl_trans_config trans_cfg
= {};
427 static const u8 no_reclaim_cmds
[] = {
434 * We use IWL_MVM_STATION_COUNT to check the validity of the station
435 * index all over the driver - check that its value corresponds to the
438 BUILD_BUG_ON(ARRAY_SIZE(mvm
->fw_id_to_mac_id
) != IWL_MVM_STATION_COUNT
);
440 /********************************
441 * 1. Allocating and configuring HW data
442 ********************************/
443 hw
= ieee80211_alloc_hw(sizeof(struct iwl_op_mode
) +
444 sizeof(struct iwl_mvm
),
449 if (cfg
->max_rx_agg_size
)
450 hw
->max_rx_aggregation_subframes
= cfg
->max_rx_agg_size
;
452 if (cfg
->max_tx_agg_size
)
453 hw
->max_tx_aggregation_subframes
= cfg
->max_tx_agg_size
;
457 mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
458 mvm
->dev
= trans
->dev
;
464 if (iwl_mvm_has_new_rx_api(mvm
)) {
465 op_mode
->ops
= &iwl_mvm_ops_mq
;
467 op_mode
->ops
= &iwl_mvm_ops
;
469 if (WARN_ON(trans
->num_rx_queues
> 1))
473 mvm
->restart_fw
= iwlwifi_mod_params
.restart_fw
? -1 : 0;
476 mvm
->first_agg_queue
= 16;
477 mvm
->last_agg_queue
= mvm
->cfg
->base_params
->num_of_queues
- 1;
478 if (mvm
->cfg
->base_params
->num_of_queues
== 16) {
480 mvm
->first_agg_queue
= 12;
482 mvm
->sf_state
= SF_UNINIT
;
483 mvm
->cur_ucode
= IWL_UCODE_INIT
;
485 mutex_init(&mvm
->mutex
);
486 mutex_init(&mvm
->d0i3_suspend_mutex
);
487 spin_lock_init(&mvm
->async_handlers_lock
);
488 INIT_LIST_HEAD(&mvm
->time_event_list
);
489 INIT_LIST_HEAD(&mvm
->aux_roc_te_list
);
490 INIT_LIST_HEAD(&mvm
->async_handlers_list
);
491 spin_lock_init(&mvm
->time_event_lock
);
492 spin_lock_init(&mvm
->queue_info_lock
);
494 INIT_WORK(&mvm
->async_handlers_wk
, iwl_mvm_async_handlers_wk
);
495 INIT_WORK(&mvm
->roc_done_wk
, iwl_mvm_roc_done_wk
);
496 INIT_WORK(&mvm
->sta_drained_wk
, iwl_mvm_sta_drained_wk
);
497 INIT_WORK(&mvm
->d0i3_exit_work
, iwl_mvm_d0i3_exit_work
);
498 INIT_DELAYED_WORK(&mvm
->fw_dump_wk
, iwl_mvm_fw_error_dump_wk
);
499 INIT_DELAYED_WORK(&mvm
->tdls_cs
.dwork
, iwl_mvm_tdls_ch_switch_work
);
501 spin_lock_init(&mvm
->d0i3_tx_lock
);
502 spin_lock_init(&mvm
->refs_lock
);
503 skb_queue_head_init(&mvm
->d0i3_tx
);
504 init_waitqueue_head(&mvm
->d0i3_exit_waitq
);
506 SET_IEEE80211_DEV(mvm
->hw
, mvm
->trans
->dev
);
509 * Populate the state variables that the transport layer needs
512 trans_cfg
.op_mode
= op_mode
;
513 trans_cfg
.no_reclaim_cmds
= no_reclaim_cmds
;
514 trans_cfg
.n_no_reclaim_cmds
= ARRAY_SIZE(no_reclaim_cmds
);
515 switch (iwlwifi_mod_params
.amsdu_size
) {
517 trans_cfg
.rx_buf_size
= IWL_AMSDU_4K
;
520 trans_cfg
.rx_buf_size
= IWL_AMSDU_8K
;
523 trans_cfg
.rx_buf_size
= IWL_AMSDU_12K
;
526 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME
,
527 iwlwifi_mod_params
.amsdu_size
);
528 trans_cfg
.rx_buf_size
= IWL_AMSDU_4K
;
530 trans_cfg
.wide_cmd_header
= fw_has_api(&mvm
->fw
->ucode_capa
,
531 IWL_UCODE_TLV_API_WIDE_CMD_HDR
);
533 if (mvm
->fw
->ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_DW_BC_TABLE
)
534 trans_cfg
.bc_table_dword
= true;
536 trans_cfg
.command_groups
= iwl_mvm_groups
;
537 trans_cfg
.command_groups_size
= ARRAY_SIZE(iwl_mvm_groups
);
539 trans_cfg
.cmd_queue
= IWL_MVM_CMD_QUEUE
;
540 trans_cfg
.cmd_fifo
= IWL_MVM_TX_FIFO_CMD
;
541 trans_cfg
.scd_set_active
= true;
543 trans_cfg
.sdio_adma_addr
= fw
->sdio_adma_addr
;
544 trans_cfg
.sw_csum_tx
= IWL_MVM_SW_TX_CSUM_OFFLOAD
;
546 /* Set a short watchdog for the command queue */
547 trans_cfg
.cmd_q_wdg_timeout
=
548 iwl_mvm_get_wd_timeout(mvm
, NULL
, false, true);
550 snprintf(mvm
->hw
->wiphy
->fw_version
,
551 sizeof(mvm
->hw
->wiphy
->fw_version
),
552 "%s", fw
->fw_version
);
554 /* Configure transport layer */
555 iwl_trans_configure(mvm
->trans
, &trans_cfg
);
557 trans
->rx_mpdu_cmd
= REPLY_RX_MPDU_CMD
;
558 trans
->rx_mpdu_cmd_hdr_size
= sizeof(struct iwl_rx_mpdu_res_start
);
559 trans
->dbg_dest_tlv
= mvm
->fw
->dbg_dest_tlv
;
560 trans
->dbg_dest_reg_num
= mvm
->fw
->dbg_dest_reg_num
;
561 memcpy(trans
->dbg_conf_tlv
, mvm
->fw
->dbg_conf_tlv
,
562 sizeof(trans
->dbg_conf_tlv
));
563 trans
->dbg_trigger_tlv
= mvm
->fw
->dbg_trigger_tlv
;
565 /* set up notification wait support */
566 iwl_notification_wait_init(&mvm
->notif_wait
);
569 mvm
->phy_db
= iwl_phy_db_init(trans
);
571 IWL_ERR(mvm
, "Cannot init phy_db\n");
575 IWL_INFO(mvm
, "Detected %s, REV=0x%X\n",
576 mvm
->cfg
->name
, mvm
->trans
->hw_rev
);
578 min_backoff
= calc_min_backoff(trans
, cfg
);
579 iwl_mvm_tt_initialize(mvm
, min_backoff
);
581 if (iwlwifi_mod_params
.nvm_file
)
582 mvm
->nvm_file_name
= iwlwifi_mod_params
.nvm_file
;
584 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
585 "working without external nvm file\n");
587 if (WARN(cfg
->no_power_up_nic_in_init
&& !mvm
->nvm_file_name
,
588 "not allowing power-up and not having nvm_file\n"))
592 * Even if nvm exists in the nvm_file driver should read again the nvm
593 * from the nic because there might be entries that exist in the OTP
594 * and not in the file.
595 * for nics with no_power_up_nic_in_init: rely completley on nvm_file
597 if (cfg
->no_power_up_nic_in_init
&& mvm
->nvm_file_name
) {
598 err
= iwl_nvm_init(mvm
, false);
602 err
= iwl_trans_start_hw(mvm
->trans
);
606 mutex_lock(&mvm
->mutex
);
607 iwl_mvm_ref(mvm
, IWL_MVM_REF_INIT_UCODE
);
608 err
= iwl_run_init_mvm_ucode(mvm
, true);
609 if (!err
|| !iwlmvm_mod_params
.init_dbg
)
610 iwl_trans_stop_device(trans
);
611 iwl_mvm_unref(mvm
, IWL_MVM_REF_INIT_UCODE
);
612 mutex_unlock(&mvm
->mutex
);
613 /* returns 0 if successful, 1 if success but in rfkill */
614 if (err
< 0 && !iwlmvm_mod_params
.init_dbg
) {
615 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", err
);
620 scan_size
= iwl_mvm_scan_size(mvm
);
622 mvm
->scan_cmd
= kmalloc(scan_size
, GFP_KERNEL
);
626 /* Set EBS as successful as long as not stated otherwise by the FW. */
627 mvm
->last_ebs_successful
= true;
629 err
= iwl_mvm_mac_setup_register(mvm
);
633 err
= iwl_mvm_dbgfs_register(mvm
, dbgfs_dir
);
637 memset(&mvm
->rx_stats
, 0, sizeof(struct mvm_statistics_rx
));
639 /* rpm starts with a taken reference, we can release it now */
640 iwl_trans_unref(mvm
->trans
);
642 iwl_mvm_tof_init(mvm
);
647 ieee80211_unregister_hw(mvm
->hw
);
648 iwl_mvm_leds_exit(mvm
);
650 flush_delayed_work(&mvm
->fw_dump_wk
);
651 iwl_phy_db_free(mvm
->phy_db
);
652 kfree(mvm
->scan_cmd
);
653 if (!cfg
->no_power_up_nic_in_init
|| !mvm
->nvm_file_name
)
654 iwl_trans_op_mode_leave(trans
);
655 ieee80211_free_hw(mvm
->hw
);
659 static void iwl_op_mode_mvm_stop(struct iwl_op_mode
*op_mode
)
661 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
664 iwl_mvm_leds_exit(mvm
);
666 iwl_mvm_tt_exit(mvm
);
668 ieee80211_unregister_hw(mvm
->hw
);
670 kfree(mvm
->scan_cmd
);
671 kfree(mvm
->mcast_filter_cmd
);
672 mvm
->mcast_filter_cmd
= NULL
;
674 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
675 kfree(mvm
->d3_resume_sram
);
678 iwl_trans_op_mode_leave(mvm
->trans
);
680 iwl_phy_db_free(mvm
->phy_db
);
683 iwl_free_nvm_data(mvm
->nvm_data
);
684 for (i
= 0; i
< NVM_MAX_NUM_SECTIONS
; i
++)
685 kfree(mvm
->nvm_sections
[i
].data
);
687 iwl_mvm_tof_clean(mvm
);
689 ieee80211_free_hw(mvm
->hw
);
692 struct iwl_async_handler_entry
{
693 struct list_head list
;
694 struct iwl_rx_cmd_buffer rxb
;
695 void (*fn
)(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
);
698 void iwl_mvm_async_handlers_purge(struct iwl_mvm
*mvm
)
700 struct iwl_async_handler_entry
*entry
, *tmp
;
702 spin_lock_bh(&mvm
->async_handlers_lock
);
703 list_for_each_entry_safe(entry
, tmp
, &mvm
->async_handlers_list
, list
) {
704 iwl_free_rxb(&entry
->rxb
);
705 list_del(&entry
->list
);
708 spin_unlock_bh(&mvm
->async_handlers_lock
);
711 static void iwl_mvm_async_handlers_wk(struct work_struct
*wk
)
713 struct iwl_mvm
*mvm
=
714 container_of(wk
, struct iwl_mvm
, async_handlers_wk
);
715 struct iwl_async_handler_entry
*entry
, *tmp
;
716 struct list_head local_list
;
718 INIT_LIST_HEAD(&local_list
);
720 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
721 mutex_lock(&mvm
->mutex
);
724 * Sync with Rx path with a lock. Remove all the entries from this list,
725 * add them to a local one (lock free), and then handle them.
727 spin_lock_bh(&mvm
->async_handlers_lock
);
728 list_splice_init(&mvm
->async_handlers_list
, &local_list
);
729 spin_unlock_bh(&mvm
->async_handlers_lock
);
731 list_for_each_entry_safe(entry
, tmp
, &local_list
, list
) {
732 entry
->fn(mvm
, &entry
->rxb
);
733 iwl_free_rxb(&entry
->rxb
);
734 list_del(&entry
->list
);
737 mutex_unlock(&mvm
->mutex
);
740 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm
*mvm
,
741 struct iwl_rx_packet
*pkt
)
743 struct iwl_fw_dbg_trigger_tlv
*trig
;
744 struct iwl_fw_dbg_trigger_cmd
*cmds_trig
;
747 if (!iwl_fw_dbg_trigger_enabled(mvm
->fw
, FW_DBG_TRIGGER_FW_NOTIF
))
750 trig
= iwl_fw_dbg_get_trigger(mvm
->fw
, FW_DBG_TRIGGER_FW_NOTIF
);
751 cmds_trig
= (void *)trig
->data
;
753 if (!iwl_fw_dbg_trigger_check_stop(mvm
, NULL
, trig
))
756 for (i
= 0; i
< ARRAY_SIZE(cmds_trig
->cmds
); i
++) {
757 /* don't collect on CMD 0 */
758 if (!cmds_trig
->cmds
[i
].cmd_id
)
761 if (cmds_trig
->cmds
[i
].cmd_id
!= pkt
->hdr
.cmd
||
762 cmds_trig
->cmds
[i
].group_id
!= pkt
->hdr
.group_id
)
765 iwl_mvm_fw_dbg_collect_trig(mvm
, trig
,
766 "CMD 0x%02x.%02x received",
767 pkt
->hdr
.group_id
, pkt
->hdr
.cmd
);
772 static void iwl_mvm_rx_common(struct iwl_mvm
*mvm
,
773 struct iwl_rx_cmd_buffer
*rxb
,
774 struct iwl_rx_packet
*pkt
)
778 iwl_mvm_rx_check_trigger(mvm
, pkt
);
781 * Do the notification wait before RX handlers so
782 * even if the RX handler consumes the RXB we have
783 * access to it in the notification wait entry.
785 iwl_notification_wait_notify(&mvm
->notif_wait
, pkt
);
787 for (i
= 0; i
< ARRAY_SIZE(iwl_mvm_rx_handlers
); i
++) {
788 const struct iwl_rx_handlers
*rx_h
= &iwl_mvm_rx_handlers
[i
];
789 struct iwl_async_handler_entry
*entry
;
791 if (rx_h
->cmd_id
!= WIDE_ID(pkt
->hdr
.group_id
, pkt
->hdr
.cmd
))
799 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
800 /* we can't do much... */
804 entry
->rxb
._page
= rxb_steal_page(rxb
);
805 entry
->rxb
._offset
= rxb
->_offset
;
806 entry
->rxb
._rx_page_order
= rxb
->_rx_page_order
;
807 entry
->fn
= rx_h
->fn
;
808 spin_lock(&mvm
->async_handlers_lock
);
809 list_add_tail(&entry
->list
, &mvm
->async_handlers_list
);
810 spin_unlock(&mvm
->async_handlers_lock
);
811 schedule_work(&mvm
->async_handlers_wk
);
816 static void iwl_mvm_rx(struct iwl_op_mode
*op_mode
,
817 struct napi_struct
*napi
,
818 struct iwl_rx_cmd_buffer
*rxb
)
820 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
821 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
823 if (likely(pkt
->hdr
.cmd
== REPLY_RX_MPDU_CMD
))
824 iwl_mvm_rx_rx_mpdu(mvm
, napi
, rxb
);
825 else if (pkt
->hdr
.cmd
== FRAME_RELEASE
)
826 iwl_mvm_rx_frame_release(mvm
, rxb
, 0);
827 else if (pkt
->hdr
.cmd
== REPLY_RX_PHY_CMD
)
828 iwl_mvm_rx_rx_phy_cmd(mvm
, rxb
);
830 iwl_mvm_rx_common(mvm
, rxb
, pkt
);
833 static void iwl_mvm_rx_mq(struct iwl_op_mode
*op_mode
,
834 struct napi_struct
*napi
,
835 struct iwl_rx_cmd_buffer
*rxb
)
837 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
838 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
840 if (likely(pkt
->hdr
.cmd
== REPLY_RX_MPDU_CMD
))
841 iwl_mvm_rx_mpdu_mq(mvm
, napi
, rxb
, 0);
842 else if (pkt
->hdr
.cmd
== REPLY_RX_PHY_CMD
)
843 iwl_mvm_rx_phy_cmd_mq(mvm
, rxb
);
845 iwl_mvm_rx_common(mvm
, rxb
, pkt
);
848 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode
*op_mode
, int queue
)
850 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
854 spin_lock_bh(&mvm
->queue_info_lock
);
855 mq
= mvm
->queue_info
[queue
].hw_queue_to_mac80211
;
856 spin_unlock_bh(&mvm
->queue_info_lock
);
858 if (WARN_ON_ONCE(!mq
))
861 for_each_set_bit(q
, &mq
, IEEE80211_MAX_QUEUES
) {
862 if (atomic_inc_return(&mvm
->mac80211_queue_stop_count
[q
]) > 1) {
863 IWL_DEBUG_TX_QUEUES(mvm
,
864 "queue %d (mac80211 %d) already stopped\n",
869 ieee80211_stop_queue(mvm
->hw
, q
);
873 static void iwl_mvm_async_cb(struct iwl_op_mode
*op_mode
,
874 const struct iwl_device_cmd
*cmd
)
876 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
879 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
880 * commands that need to block the Tx queues.
882 iwl_trans_block_txq_ptrs(mvm
->trans
, false);
885 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode
*op_mode
, int queue
)
887 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
891 spin_lock_bh(&mvm
->queue_info_lock
);
892 mq
= mvm
->queue_info
[queue
].hw_queue_to_mac80211
;
893 spin_unlock_bh(&mvm
->queue_info_lock
);
895 if (WARN_ON_ONCE(!mq
))
898 for_each_set_bit(q
, &mq
, IEEE80211_MAX_QUEUES
) {
899 if (atomic_dec_return(&mvm
->mac80211_queue_stop_count
[q
]) > 0) {
900 IWL_DEBUG_TX_QUEUES(mvm
,
901 "queue %d (mac80211 %d) still stopped\n",
906 ieee80211_wake_queue(mvm
->hw
, q
);
910 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm
*mvm
, bool state
)
913 set_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
);
915 clear_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
);
917 wiphy_rfkill_set_hw_state(mvm
->hw
->wiphy
, iwl_mvm_is_radio_killed(mvm
));
920 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode
*op_mode
, bool state
)
922 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
923 bool calibrating
= ACCESS_ONCE(mvm
->calibrating
);
926 set_bit(IWL_MVM_STATUS_HW_RFKILL
, &mvm
->status
);
928 clear_bit(IWL_MVM_STATUS_HW_RFKILL
, &mvm
->status
);
930 wiphy_rfkill_set_hw_state(mvm
->hw
->wiphy
, iwl_mvm_is_radio_killed(mvm
));
932 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
934 iwl_abort_notification_waits(&mvm
->notif_wait
);
937 * Stop the device if we run OPERATIONAL firmware or if we are in the
938 * middle of the calibrations.
940 return state
&& (mvm
->cur_ucode
!= IWL_UCODE_INIT
|| calibrating
);
943 static void iwl_mvm_free_skb(struct iwl_op_mode
*op_mode
, struct sk_buff
*skb
)
945 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
946 struct ieee80211_tx_info
*info
;
948 info
= IEEE80211_SKB_CB(skb
);
949 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
950 ieee80211_free_txskb(mvm
->hw
, skb
);
953 struct iwl_mvm_reprobe
{
955 struct work_struct work
;
958 static void iwl_mvm_reprobe_wk(struct work_struct
*wk
)
960 struct iwl_mvm_reprobe
*reprobe
;
962 reprobe
= container_of(wk
, struct iwl_mvm_reprobe
, work
);
963 if (device_reprobe(reprobe
->dev
))
964 dev_err(reprobe
->dev
, "reprobe failed!\n");
966 module_put(THIS_MODULE
);
969 static void iwl_mvm_fw_error_dump_wk(struct work_struct
*work
)
971 struct iwl_mvm
*mvm
=
972 container_of(work
, struct iwl_mvm
, fw_dump_wk
.work
);
974 if (iwl_mvm_ref_sync(mvm
, IWL_MVM_REF_FW_DBG_COLLECT
))
977 mutex_lock(&mvm
->mutex
);
980 if (mvm
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
981 iwl_set_bits_prph(mvm
->trans
, MON_BUFF_SAMPLE_CTL
, 0x100);
983 iwl_write_prph(mvm
->trans
, DBGC_IN_SAMPLE
, 0);
984 /* wait before we collect the data till the DBGC stop */
988 iwl_mvm_fw_error_dump(mvm
);
990 /* start recording again if the firmware is not crashed */
991 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR
, &mvm
->trans
->status
)) &&
992 mvm
->fw
->dbg_dest_tlv
&&
993 iwl_mvm_start_fw_dbg_conf(mvm
, mvm
->fw_dbg_conf
));
995 mutex_unlock(&mvm
->mutex
);
997 iwl_mvm_unref(mvm
, IWL_MVM_REF_FW_DBG_COLLECT
);
1000 void iwl_mvm_nic_restart(struct iwl_mvm
*mvm
, bool fw_error
)
1002 iwl_abort_notification_waits(&mvm
->notif_wait
);
1005 * This is a bit racy, but worst case we tell mac80211 about
1006 * a stopped/aborted scan when that was already done which
1007 * is not a problem. It is necessary to abort any os scan
1008 * here because mac80211 requires having the scan cleared
1009 * before restarting.
1010 * We'll reset the scan_status to NONE in restart cleanup in
1011 * the next start() call from mac80211. If restart isn't called
1012 * (no fw restart) scan status will stay busy.
1014 iwl_mvm_report_scan_aborted(mvm
);
1017 * If we're restarting already, don't cycle restarts.
1018 * If INIT fw asserted, it will likely fail again.
1019 * If WoWLAN fw asserted, don't restart either, mac80211
1020 * can't recover this since we're already half suspended.
1022 if (!mvm
->restart_fw
&& fw_error
) {
1023 iwl_mvm_fw_dbg_collect_desc(mvm
, &iwl_mvm_dump_desc_assert
,
1025 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART
,
1027 struct iwl_mvm_reprobe
*reprobe
;
1030 "Firmware error during reconfiguration - reprobe!\n");
1033 * get a module reference to avoid doing this while unloading
1034 * anyway and to avoid scheduling a work with code that's
1037 if (!try_module_get(THIS_MODULE
)) {
1038 IWL_ERR(mvm
, "Module is being unloaded - abort\n");
1042 reprobe
= kzalloc(sizeof(*reprobe
), GFP_ATOMIC
);
1044 module_put(THIS_MODULE
);
1047 reprobe
->dev
= mvm
->trans
->dev
;
1048 INIT_WORK(&reprobe
->work
, iwl_mvm_reprobe_wk
);
1049 schedule_work(&reprobe
->work
);
1050 } else if (mvm
->cur_ucode
== IWL_UCODE_REGULAR
) {
1051 /* don't let the transport/FW power down */
1052 iwl_mvm_ref(mvm
, IWL_MVM_REF_UCODE_DOWN
);
1054 if (fw_error
&& mvm
->restart_fw
> 0)
1056 ieee80211_restart_hw(mvm
->hw
);
1060 static void iwl_mvm_nic_error(struct iwl_op_mode
*op_mode
)
1062 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
1064 iwl_mvm_dump_nic_error_log(mvm
);
1066 iwl_mvm_nic_restart(mvm
, true);
1069 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode
*op_mode
)
1071 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
1074 iwl_mvm_nic_restart(mvm
, true);
1077 struct iwl_d0i3_iter_data
{
1078 struct iwl_mvm
*mvm
;
1079 struct ieee80211_vif
*connected_vif
;
1083 bool disable_offloading
;
1086 static bool iwl_mvm_disallow_offloading(struct iwl_mvm
*mvm
,
1087 struct ieee80211_vif
*vif
,
1088 struct iwl_d0i3_iter_data
*iter_data
)
1090 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1091 struct ieee80211_sta
*ap_sta
;
1092 struct iwl_mvm_sta
*mvmsta
;
1093 u32 available_tids
= 0;
1096 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_STATION
||
1097 mvmvif
->ap_sta_id
== IWL_MVM_STATION_COUNT
))
1100 ap_sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[mvmvif
->ap_sta_id
]);
1101 if (IS_ERR_OR_NULL(ap_sta
))
1104 mvmsta
= iwl_mvm_sta_from_mac80211(ap_sta
);
1105 spin_lock_bh(&mvmsta
->lock
);
1106 for (tid
= 0; tid
< IWL_MAX_TID_COUNT
; tid
++) {
1107 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1110 * in case of pending tx packets, don't use this tid
1111 * for offloading in order to prevent reuse of the same
1114 if (iwl_mvm_tid_queued(tid_data
))
1117 if (tid_data
->state
!= IWL_AGG_OFF
)
1120 available_tids
|= BIT(tid
);
1122 spin_unlock_bh(&mvmsta
->lock
);
1125 * disallow protocol offloading if we have no available tid
1126 * (with no pending frames and no active aggregation,
1127 * as we don't handle "holes" properly - the scheduler needs the
1128 * frame's seq number and TFD index to match)
1130 if (!available_tids
)
1133 /* for simplicity, just use the first available tid */
1134 iter_data
->offloading_tid
= ffs(available_tids
) - 1;
1138 static void iwl_mvm_enter_d0i3_iterator(void *_data
, u8
*mac
,
1139 struct ieee80211_vif
*vif
)
1141 struct iwl_d0i3_iter_data
*data
= _data
;
1142 struct iwl_mvm
*mvm
= data
->mvm
;
1143 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1144 u32 flags
= CMD_ASYNC
| CMD_HIGH_PRIO
| CMD_SEND_IN_IDLE
;
1146 IWL_DEBUG_RPM(mvm
, "entering D0i3 - vif %pM\n", vif
->addr
);
1147 if (vif
->type
!= NL80211_IFTYPE_STATION
||
1148 !vif
->bss_conf
.assoc
)
1152 * in case of pending tx packets or active aggregations,
1153 * avoid offloading features in order to prevent reuse of
1154 * the same qos seq counters.
1156 if (iwl_mvm_disallow_offloading(mvm
, vif
, data
))
1157 data
->disable_offloading
= true;
1159 iwl_mvm_update_d0i3_power_mode(mvm
, vif
, true, flags
);
1160 iwl_mvm_send_proto_offload(mvm
, vif
, data
->disable_offloading
,
1164 * on init/association, mvm already configures POWER_TABLE_CMD
1165 * and REPLY_MCAST_FILTER_CMD, so currently don't
1166 * reconfigure them (we might want to use different
1167 * params later on, though).
1169 data
->ap_sta_id
= mvmvif
->ap_sta_id
;
1173 * no new commands can be sent at this stage, so it's safe
1174 * to save the vif pointer during d0i3 entrance.
1176 data
->connected_vif
= vif
;
1179 static void iwl_mvm_set_wowlan_data(struct iwl_mvm
*mvm
,
1180 struct iwl_wowlan_config_cmd
*cmd
,
1181 struct iwl_d0i3_iter_data
*iter_data
)
1183 struct ieee80211_sta
*ap_sta
;
1184 struct iwl_mvm_sta
*mvm_ap_sta
;
1186 if (iter_data
->ap_sta_id
== IWL_MVM_STATION_COUNT
)
1191 ap_sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[iter_data
->ap_sta_id
]);
1192 if (IS_ERR_OR_NULL(ap_sta
))
1195 mvm_ap_sta
= iwl_mvm_sta_from_mac80211(ap_sta
);
1196 cmd
->is_11n_connection
= ap_sta
->ht_cap
.ht_supported
;
1197 cmd
->offloading_tid
= iter_data
->offloading_tid
;
1198 cmd
->flags
= ENABLE_L3_FILTERING
| ENABLE_NBNS_FILTERING
|
1199 ENABLE_DHCP_FILTERING
;
1201 * The d0i3 uCode takes care of the nonqos counters,
1202 * so configure only the qos seq ones.
1204 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta
, cmd
);
1209 int iwl_mvm_enter_d0i3(struct iwl_op_mode
*op_mode
)
1211 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
1212 u32 flags
= CMD_ASYNC
| CMD_HIGH_PRIO
| CMD_SEND_IN_IDLE
;
1214 struct iwl_d0i3_iter_data d0i3_iter_data
= {
1217 struct iwl_wowlan_config_cmd wowlan_config_cmd
= {
1218 .wakeup_filter
= cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME
|
1219 IWL_WOWLAN_WAKEUP_BEACON_MISS
|
1220 IWL_WOWLAN_WAKEUP_LINK_CHANGE
|
1221 IWL_WOWLAN_WAKEUP_BCN_FILTERING
),
1223 struct iwl_d3_manager_config d3_cfg_cmd
= {
1224 .min_sleep_time
= cpu_to_le32(1000),
1225 .wakeup_flags
= cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR
),
1228 IWL_DEBUG_RPM(mvm
, "MVM entering D0i3\n");
1230 if (WARN_ON_ONCE(mvm
->cur_ucode
!= IWL_UCODE_REGULAR
))
1233 set_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
);
1236 * iwl_mvm_ref_sync takes a reference before checking the flag.
1237 * so by checking there is no held reference we prevent a state
1238 * in which iwl_mvm_ref_sync continues successfully while we
1239 * configure the firmware to enter d0i3
1241 if (iwl_mvm_ref_taken(mvm
)) {
1242 IWL_DEBUG_RPM(mvm
->trans
, "abort d0i3 due to taken ref\n");
1243 clear_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
);
1244 wake_up(&mvm
->d0i3_exit_waitq
);
1248 ieee80211_iterate_active_interfaces_atomic(mvm
->hw
,
1249 IEEE80211_IFACE_ITER_NORMAL
,
1250 iwl_mvm_enter_d0i3_iterator
,
1252 if (d0i3_iter_data
.vif_count
== 1) {
1253 mvm
->d0i3_ap_sta_id
= d0i3_iter_data
.ap_sta_id
;
1254 mvm
->d0i3_offloading
= !d0i3_iter_data
.disable_offloading
;
1256 WARN_ON_ONCE(d0i3_iter_data
.vif_count
> 1);
1257 mvm
->d0i3_ap_sta_id
= IWL_MVM_STATION_COUNT
;
1258 mvm
->d0i3_offloading
= false;
1261 /* make sure we have no running tx while configuring the seqno */
1264 /* Flush the hw queues, in case something got queued during entry */
1265 ret
= iwl_mvm_flush_tx_path(mvm
, iwl_mvm_flushable_queues(mvm
), flags
);
1269 /* configure wowlan configuration only if needed */
1270 if (mvm
->d0i3_ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
1271 iwl_mvm_wowlan_config_key_params(mvm
,
1272 d0i3_iter_data
.connected_vif
,
1275 iwl_mvm_set_wowlan_data(mvm
, &wowlan_config_cmd
,
1278 ret
= iwl_mvm_send_cmd_pdu(mvm
, WOWLAN_CONFIGURATION
, flags
,
1279 sizeof(wowlan_config_cmd
),
1280 &wowlan_config_cmd
);
1285 return iwl_mvm_send_cmd_pdu(mvm
, D3_CONFIG_CMD
,
1286 flags
| CMD_MAKE_TRANS_IDLE
,
1287 sizeof(d3_cfg_cmd
), &d3_cfg_cmd
);
1290 static void iwl_mvm_exit_d0i3_iterator(void *_data
, u8
*mac
,
1291 struct ieee80211_vif
*vif
)
1293 struct iwl_mvm
*mvm
= _data
;
1294 u32 flags
= CMD_ASYNC
| CMD_HIGH_PRIO
;
1296 IWL_DEBUG_RPM(mvm
, "exiting D0i3 - vif %pM\n", vif
->addr
);
1297 if (vif
->type
!= NL80211_IFTYPE_STATION
||
1298 !vif
->bss_conf
.assoc
)
1301 iwl_mvm_update_d0i3_power_mode(mvm
, vif
, false, flags
);
1304 struct iwl_mvm_d0i3_exit_work_iter_data
{
1305 struct iwl_mvm
*mvm
;
1306 struct iwl_wowlan_status
*status
;
1310 static void iwl_mvm_d0i3_exit_work_iter(void *_data
, u8
*mac
,
1311 struct ieee80211_vif
*vif
)
1313 struct iwl_mvm_d0i3_exit_work_iter_data
*data
= _data
;
1314 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1315 u32 reasons
= data
->wakeup_reasons
;
1317 /* consider only the relevant station interface */
1318 if (vif
->type
!= NL80211_IFTYPE_STATION
|| !vif
->bss_conf
.assoc
||
1319 data
->mvm
->d0i3_ap_sta_id
!= mvmvif
->ap_sta_id
)
1322 if (reasons
& IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH
)
1323 iwl_mvm_connection_loss(data
->mvm
, vif
, "D0i3");
1324 else if (reasons
& IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON
)
1325 ieee80211_beacon_loss(vif
);
1327 iwl_mvm_d0i3_update_keys(data
->mvm
, vif
, data
->status
);
1330 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm
*mvm
, __le16
*qos_seq
)
1332 struct ieee80211_sta
*sta
= NULL
;
1333 struct iwl_mvm_sta
*mvm_ap_sta
;
1335 bool wake_queues
= false;
1337 lockdep_assert_held(&mvm
->mutex
);
1339 spin_lock_bh(&mvm
->d0i3_tx_lock
);
1341 if (mvm
->d0i3_ap_sta_id
== IWL_MVM_STATION_COUNT
)
1344 IWL_DEBUG_RPM(mvm
, "re-enqueue packets\n");
1346 /* get the sta in order to update seq numbers and re-enqueue skbs */
1347 sta
= rcu_dereference_protected(
1348 mvm
->fw_id_to_mac_id
[mvm
->d0i3_ap_sta_id
],
1349 lockdep_is_held(&mvm
->mutex
));
1351 if (IS_ERR_OR_NULL(sta
)) {
1356 if (mvm
->d0i3_offloading
&& qos_seq
) {
1357 /* update qos seq numbers if offloading was enabled */
1358 mvm_ap_sta
= iwl_mvm_sta_from_mac80211(sta
);
1359 for (i
= 0; i
< IWL_MAX_TID_COUNT
; i
++) {
1360 u16 seq
= le16_to_cpu(qos_seq
[i
]);
1361 /* firmware stores last-used one, we store next one */
1363 mvm_ap_sta
->tid_data
[i
].seq_number
= seq
;
1367 /* re-enqueue (or drop) all packets */
1368 while (!skb_queue_empty(&mvm
->d0i3_tx
)) {
1369 struct sk_buff
*skb
= __skb_dequeue(&mvm
->d0i3_tx
);
1371 if (!sta
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
1372 ieee80211_free_txskb(mvm
->hw
, skb
);
1374 /* if the skb_queue is not empty, we need to wake queues */
1377 clear_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
);
1378 wake_up(&mvm
->d0i3_exit_waitq
);
1379 mvm
->d0i3_ap_sta_id
= IWL_MVM_STATION_COUNT
;
1381 ieee80211_wake_queues(mvm
->hw
);
1383 spin_unlock_bh(&mvm
->d0i3_tx_lock
);
1386 static void iwl_mvm_d0i3_exit_work(struct work_struct
*wk
)
1388 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, d0i3_exit_work
);
1389 struct iwl_host_cmd get_status_cmd
= {
1390 .id
= WOWLAN_GET_STATUSES
,
1391 .flags
= CMD_HIGH_PRIO
| CMD_WANT_SKB
,
1393 struct iwl_mvm_d0i3_exit_work_iter_data iter_data
= {
1397 struct iwl_wowlan_status
*status
;
1399 u32 wakeup_reasons
= 0;
1400 __le16
*qos_seq
= NULL
;
1402 mutex_lock(&mvm
->mutex
);
1403 ret
= iwl_mvm_send_cmd(mvm
, &get_status_cmd
);
1407 if (!get_status_cmd
.resp_pkt
)
1410 status
= (void *)get_status_cmd
.resp_pkt
->data
;
1411 wakeup_reasons
= le32_to_cpu(status
->wakeup_reasons
);
1412 qos_seq
= status
->qos_seq_ctr
;
1414 IWL_DEBUG_RPM(mvm
, "wakeup reasons: 0x%x\n", wakeup_reasons
);
1416 iter_data
.wakeup_reasons
= wakeup_reasons
;
1417 iter_data
.status
= status
;
1418 ieee80211_iterate_active_interfaces(mvm
->hw
,
1419 IEEE80211_IFACE_ITER_NORMAL
,
1420 iwl_mvm_d0i3_exit_work_iter
,
1423 iwl_mvm_d0i3_enable_tx(mvm
, qos_seq
);
1425 IWL_DEBUG_INFO(mvm
, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1428 /* qos_seq might point inside resp_pkt, so free it only now */
1429 if (get_status_cmd
.resp_pkt
)
1430 iwl_free_resp(&get_status_cmd
);
1432 /* the FW might have updated the regdomain */
1433 iwl_mvm_update_changed_regdom(mvm
);
1435 iwl_mvm_unref(mvm
, IWL_MVM_REF_EXIT_WORK
);
1436 mutex_unlock(&mvm
->mutex
);
1439 int _iwl_mvm_exit_d0i3(struct iwl_mvm
*mvm
)
1441 u32 flags
= CMD_ASYNC
| CMD_HIGH_PRIO
| CMD_SEND_IN_IDLE
|
1445 IWL_DEBUG_RPM(mvm
, "MVM exiting D0i3\n");
1447 if (WARN_ON_ONCE(mvm
->cur_ucode
!= IWL_UCODE_REGULAR
))
1450 mutex_lock(&mvm
->d0i3_suspend_mutex
);
1451 if (test_bit(D0I3_DEFER_WAKEUP
, &mvm
->d0i3_suspend_flags
)) {
1452 IWL_DEBUG_RPM(mvm
, "Deferring d0i3 exit until resume\n");
1453 __set_bit(D0I3_PENDING_WAKEUP
, &mvm
->d0i3_suspend_flags
);
1454 mutex_unlock(&mvm
->d0i3_suspend_mutex
);
1457 mutex_unlock(&mvm
->d0i3_suspend_mutex
);
1459 ret
= iwl_mvm_send_cmd_pdu(mvm
, D0I3_END_CMD
, flags
, 0, NULL
);
1463 ieee80211_iterate_active_interfaces_atomic(mvm
->hw
,
1464 IEEE80211_IFACE_ITER_NORMAL
,
1465 iwl_mvm_exit_d0i3_iterator
,
1468 schedule_work(&mvm
->d0i3_exit_work
);
1472 int iwl_mvm_exit_d0i3(struct iwl_op_mode
*op_mode
)
1474 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
1476 iwl_mvm_ref(mvm
, IWL_MVM_REF_EXIT_WORK
);
1477 return _iwl_mvm_exit_d0i3(mvm
);
1480 #define IWL_MVM_COMMON_OPS \
1481 /* these could be differentiated */ \
1482 .async_cb = iwl_mvm_async_cb, \
1483 .queue_full = iwl_mvm_stop_sw_queue, \
1484 .queue_not_full = iwl_mvm_wake_sw_queue, \
1485 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
1486 .free_skb = iwl_mvm_free_skb, \
1487 .nic_error = iwl_mvm_nic_error, \
1488 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
1489 .nic_config = iwl_mvm_nic_config, \
1490 .enter_d0i3 = iwl_mvm_enter_d0i3, \
1491 .exit_d0i3 = iwl_mvm_exit_d0i3, \
1492 /* as we only register one, these MUST be common! */ \
1493 .start = iwl_op_mode_mvm_start, \
1494 .stop = iwl_op_mode_mvm_stop
1496 static const struct iwl_op_mode_ops iwl_mvm_ops
= {
1501 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode
*op_mode
,
1502 struct napi_struct
*napi
,
1503 struct iwl_rx_cmd_buffer
*rxb
,
1506 struct iwl_mvm
*mvm
= IWL_OP_MODE_GET_MVM(op_mode
);
1507 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1509 if (unlikely(pkt
->hdr
.cmd
== FRAME_RELEASE
))
1510 iwl_mvm_rx_frame_release(mvm
, rxb
, queue
);
1512 iwl_mvm_rx_mpdu_mq(mvm
, napi
, rxb
, queue
);
1515 static const struct iwl_op_mode_ops iwl_mvm_ops_mq
= {
1517 .rx
= iwl_mvm_rx_mq
,
1518 .rx_rss
= iwl_mvm_rx_mq_rss
,